aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-01-18 16:23:48 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-01-18 16:23:48 +0000
commit06d4ba388873e6d1cfa9cd715a8935ecc8cd2097 (patch)
tree3eb853da77d46cc77c4b017525a422f9ddb1385b /lib
parent30d791273d07fac9c0c1641a0731191bca6e8606 (diff)
downloadsrc-06d4ba388873e6d1cfa9cd715a8935ecc8cd2097.tar.gz
src-06d4ba388873e6d1cfa9cd715a8935ecc8cd2097.zip
Vendor import of clang RELEASE_360/rc1 tag r226102 (effectively, 3.6.0 RC1):vendor/clang/clang-release_360-r226102
Notes
Notes: svn path=/vendor/clang/dist/; revision=277325 svn path=/vendor/clang/clang-release_360-r226102/; revision=277326; tag=vendor/clang/clang-release_360-r226102
Diffstat (limited to 'lib')
-rw-r--r--lib/ARCMigrate/ARCMT.cpp15
-rw-r--r--lib/ARCMigrate/FileRemapper.cpp34
-rw-r--r--lib/ARCMigrate/Internals.h2
-rw-r--r--lib/ARCMigrate/ObjCMT.cpp286
-rw-r--r--lib/ARCMigrate/PlistReporter.cpp6
-rw-r--r--lib/ARCMigrate/TransformActions.cpp3
-rw-r--r--lib/AST/APValue.cpp4
-rw-r--r--lib/AST/ASTContext.cpp419
-rw-r--r--lib/AST/ASTDiagnostic.cpp431
-rw-r--r--lib/AST/ASTDumper.cpp1144
-rw-r--r--lib/AST/ASTImporter.cpp124
-rw-r--r--lib/AST/ASTTypeTraits.cpp47
-rw-r--r--lib/AST/CMakeLists.txt1
-rw-r--r--lib/AST/CXXABI.h4
-rw-r--r--lib/AST/Comment.cpp17
-rw-r--r--lib/AST/CommentCommandTraits.cpp4
-rw-r--r--lib/AST/CommentLexer.cpp4
-rw-r--r--lib/AST/Decl.cpp159
-rw-r--r--lib/AST/DeclBase.cpp28
-rw-r--r--lib/AST/DeclCXX.cpp94
-rw-r--r--lib/AST/DeclObjC.cpp66
-rw-r--r--lib/AST/DeclPrinter.cpp57
-rw-r--r--lib/AST/Expr.cpp205
-rw-r--r--lib/AST/ExprCXX.cpp16
-rw-r--r--lib/AST/ExprClassification.cpp11
-rw-r--r--lib/AST/ExprConstant.cpp579
-rw-r--r--lib/AST/ItaniumCXXABI.cpp54
-rw-r--r--lib/AST/ItaniumMangle.cpp187
-rw-r--r--lib/AST/Mangle.cpp92
-rw-r--r--lib/AST/MangleNumberingContext.cpp45
-rw-r--r--lib/AST/MicrosoftCXXABI.cpp21
-rw-r--r--lib/AST/MicrosoftMangle.cpp174
-rw-r--r--lib/AST/NSAPI.cpp31
-rw-r--r--lib/AST/NestedNameSpecifier.cpp71
-rw-r--r--lib/AST/RecordLayoutBuilder.cpp120
-rw-r--r--lib/AST/Stmt.cpp454
-rw-r--r--lib/AST/StmtPrinter.cpp91
-rw-r--r--lib/AST/StmtProfile.cpp76
-rw-r--r--lib/AST/TemplateBase.cpp28
-rw-r--r--lib/AST/Type.cpp143
-rw-r--r--lib/AST/TypeLoc.cpp8
-rw-r--r--lib/AST/TypePrinter.cpp19
-rw-r--r--lib/AST/VTTBuilder.cpp4
-rw-r--r--lib/AST/VTableBuilder.cpp334
-rw-r--r--lib/ASTMatchers/ASTMatchFinder.cpp289
-rw-r--r--lib/ASTMatchers/ASTMatchersInternal.cpp268
-rw-r--r--lib/ASTMatchers/Dynamic/Marshallers.h105
-rw-r--r--lib/ASTMatchers/Dynamic/Parser.cpp105
-rw-r--r--lib/ASTMatchers/Dynamic/Registry.cpp171
-rw-r--r--lib/ASTMatchers/Dynamic/VariantValue.cpp164
-rw-r--r--lib/Analysis/AnalysisDeclContext.cpp29
-rw-r--r--lib/Analysis/BodyFarm.cpp12
-rw-r--r--lib/Analysis/BodyFarm.h8
-rw-r--r--lib/Analysis/CFG.cpp354
-rw-r--r--lib/Analysis/CMakeLists.txt1
-rw-r--r--lib/Analysis/CallGraph.cpp13
-rw-r--r--lib/Analysis/CodeInjector.cpp15
-rw-r--r--lib/Analysis/FormatString.cpp38
-rw-r--r--lib/Analysis/FormatStringParsing.h4
-rw-r--r--lib/Analysis/LiveVariables.cpp1
-rw-r--r--lib/Analysis/PrintfFormatString.cpp78
-rw-r--r--lib/Analysis/ReachableCode.cpp4
-rw-r--r--lib/Analysis/ScanfFormatString.cpp11
-rw-r--r--lib/Analysis/ThreadSafety.cpp1436
-rw-r--r--lib/Analysis/ThreadSafetyCommon.cpp335
-rw-r--r--lib/Analysis/ThreadSafetyTIL.cpp280
-rw-r--r--lib/Analysis/UninitializedValues.cpp32
-rw-r--r--lib/Basic/Attributes.cpp4
-rw-r--r--lib/Basic/CMakeLists.txt80
-rw-r--r--lib/Basic/Diagnostic.cpp42
-rw-r--r--lib/Basic/DiagnosticIDs.cpp31
-rw-r--r--lib/Basic/FileManager.cpp192
-rw-r--r--lib/Basic/FileSystemStatCache.cpp9
-rw-r--r--lib/Basic/IdentifierTable.cpp119
-rw-r--r--lib/Basic/LangOptions.cpp8
-rw-r--r--lib/Basic/Module.cpp58
-rw-r--r--lib/Basic/OpenMPKinds.cpp82
-rw-r--r--lib/Basic/SanitizerBlacklist.cpp46
-rw-r--r--lib/Basic/Sanitizers.cpp35
-rw-r--r--lib/Basic/SourceLocation.cpp8
-rw-r--r--lib/Basic/SourceManager.cpp67
-rw-r--r--lib/Basic/TargetInfo.cpp37
-rw-r--r--lib/Basic/Targets.cpp1116
-rw-r--r--lib/Basic/Version.cpp2
-rw-r--r--lib/Basic/VersionTuple.cpp4
-rw-r--r--lib/Basic/VirtualFileSystem.cpp100
-rw-r--r--lib/CodeGen/ABIInfo.h23
-rw-r--r--lib/CodeGen/BackendUtil.cpp88
-rw-r--r--lib/CodeGen/CGAtomic.cpp328
-rw-r--r--lib/CodeGen/CGBlocks.cpp48
-rw-r--r--lib/CodeGen/CGBlocks.h4
-rw-r--r--lib/CodeGen/CGBuilder.h6
-rw-r--r--lib/CodeGen/CGBuiltin.cpp530
-rw-r--r--lib/CodeGen/CGCUDARuntime.cpp3
-rw-r--r--lib/CodeGen/CGCUDARuntime.h4
-rw-r--r--lib/CodeGen/CGCXX.cpp157
-rw-r--r--lib/CodeGen/CGCXXABI.cpp23
-rw-r--r--lib/CodeGen/CGCXXABI.h102
-rw-r--r--lib/CodeGen/CGCall.cpp1512
-rw-r--r--lib/CodeGen/CGCall.h4
-rw-r--r--lib/CodeGen/CGClass.cpp263
-rw-r--r--lib/CodeGen/CGCleanup.cpp12
-rw-r--r--lib/CodeGen/CGCleanup.h12
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp1569
-rw-r--r--lib/CodeGen/CGDebugInfo.h136
-rw-r--r--lib/CodeGen/CGDecl.cpp141
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp154
-rw-r--r--lib/CodeGen/CGException.cpp106
-rw-r--r--lib/CodeGen/CGExpr.cpp547
-rw-r--r--lib/CodeGen/CGExprCXX.cpp301
-rw-r--r--lib/CodeGen/CGExprComplex.cpp271
-rw-r--r--lib/CodeGen/CGExprConstant.cpp202
-rw-r--r--lib/CodeGen/CGExprScalar.cpp240
-rw-r--r--lib/CodeGen/CGLoopInfo.cpp27
-rw-r--r--lib/CodeGen/CGLoopInfo.h6
-rw-r--r--lib/CodeGen/CGObjC.cpp43
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp31
-rw-r--r--lib/CodeGen/CGObjCMac.cpp424
-rw-r--r--lib/CodeGen/CGObjCRuntime.h4
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.h4
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.cpp794
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.h315
-rw-r--r--lib/CodeGen/CGRecordLayout.h4
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp59
-rw-r--r--lib/CodeGen/CGStmt.cpp196
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp639
-rw-r--r--lib/CodeGen/CGVTables.cpp126
-rw-r--r--lib/CodeGen/CGVTables.h4
-rw-r--r--lib/CodeGen/CGValue.h4
-rw-r--r--lib/CodeGen/CMakeLists.txt15
-rw-r--r--lib/CodeGen/CodeGenABITypes.cpp9
-rw-r--r--lib/CodeGen/CodeGenAction.cpp89
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp232
-rw-r--r--lib/CodeGen/CodeGenFunction.h396
-rw-r--r--lib/CodeGen/CodeGenModule.cpp668
-rw-r--r--lib/CodeGen/CodeGenModule.h194
-rw-r--r--lib/CodeGen/CodeGenPGO.cpp364
-rw-r--r--lib/CodeGen/CodeGenPGO.h48
-rw-r--r--lib/CodeGen/CodeGenTBAA.h4
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp30
-rw-r--r--lib/CodeGen/CodeGenTypes.h140
-rw-r--r--lib/CodeGen/CoverageMappingGen.cpp1174
-rw-r--r--lib/CodeGen/CoverageMappingGen.h114
-rw-r--r--lib/CodeGen/EHScopeStack.h6
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp400
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp457
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp66
-rw-r--r--lib/CodeGen/SanitizerBlacklist.cpp52
-rw-r--r--lib/CodeGen/SanitizerBlacklist.h46
-rw-r--r--lib/CodeGen/SanitizerMetadata.cpp92
-rw-r--r--lib/CodeGen/SanitizerMetadata.h53
-rw-r--r--lib/CodeGen/TargetInfo.cpp1110
-rw-r--r--lib/CodeGen/TargetInfo.h22
-rw-r--r--lib/Driver/Action.cpp74
-rw-r--r--lib/Driver/CMakeLists.txt3
-rw-r--r--lib/Driver/Compilation.cpp19
-rw-r--r--lib/Driver/CrossWindowsToolChain.cpp117
-rw-r--r--lib/Driver/Driver.cpp506
-rw-r--r--lib/Driver/InputInfo.h4
-rw-r--r--lib/Driver/Job.cpp187
-rw-r--r--lib/Driver/MSVCToolChain.cpp496
-rw-r--r--lib/Driver/Multilib.cpp2
-rw-r--r--lib/Driver/Phases.cpp1
-rw-r--r--lib/Driver/SanitizerArgs.cpp524
-rw-r--r--lib/Driver/Tool.cpp12
-rw-r--r--lib/Driver/ToolChain.cpp36
-rw-r--r--lib/Driver/ToolChains.cpp594
-rw-r--r--lib/Driver/ToolChains.h93
-rw-r--r--lib/Driver/Tools.cpp1590
-rw-r--r--lib/Driver/Tools.h180
-rw-r--r--lib/Driver/Types.cpp2
-rw-r--r--lib/Driver/WindowsToolChain.cpp338
-rw-r--r--lib/Edit/EditedSource.cpp6
-rw-r--r--lib/Edit/RewriteObjCFoundationAPI.cpp3
-rw-r--r--lib/Format/BreakableToken.cpp3
-rw-r--r--lib/Format/BreakableToken.h11
-rw-r--r--lib/Format/CMakeLists.txt3
-rw-r--r--lib/Format/ContinuationIndenter.cpp465
-rw-r--r--lib/Format/ContinuationIndenter.h34
-rw-r--r--lib/Format/Encoding.h6
-rw-r--r--lib/Format/Format.cpp975
-rw-r--r--lib/Format/FormatToken.cpp24
-rw-r--r--lib/Format/FormatToken.h150
-rw-r--r--lib/Format/TokenAnnotator.cpp1041
-rw-r--r--lib/Format/TokenAnnotator.h22
-rw-r--r--lib/Format/UnwrappedLineFormatter.cpp706
-rw-r--r--lib/Format/UnwrappedLineFormatter.h168
-rw-r--r--lib/Format/UnwrappedLineParser.cpp244
-rw-r--r--lib/Format/UnwrappedLineParser.h14
-rw-r--r--lib/Format/WhitespaceManager.cpp12
-rw-r--r--lib/Format/WhitespaceManager.h6
-rw-r--r--lib/Frontend/ASTConsumers.cpp40
-rw-r--r--lib/Frontend/ASTMerge.cpp10
-rw-r--r--lib/Frontend/ASTUnit.cpp355
-rw-r--r--lib/Frontend/CMakeLists.txt3
-rw-r--r--lib/Frontend/CacheTokens.cpp16
-rw-r--r--lib/Frontend/ChainedIncludesSource.cpp33
-rw-r--r--lib/Frontend/CodeGenOptions.cpp24
-rw-r--r--lib/Frontend/CompilerInstance.cpp268
-rw-r--r--lib/Frontend/CompilerInvocation.cpp207
-rw-r--r--lib/Frontend/CreateInvocationFromCommandLine.cpp6
-rw-r--r--lib/Frontend/DependencyFile.cpp36
-rw-r--r--lib/Frontend/DependencyGraph.cpp13
-rw-r--r--lib/Frontend/DiagnosticRenderer.cpp4
-rw-r--r--lib/Frontend/FrontendAction.cpp80
-rw-r--r--lib/Frontend/FrontendActions.cpp168
-rw-r--r--lib/Frontend/HeaderIncludeGen.cpp20
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp16
-rw-r--r--lib/Frontend/InitPreprocessor.cpp166
-rw-r--r--lib/Frontend/LogDiagnosticPrinter.cpp26
-rw-r--r--lib/Frontend/ModuleDependencyCollector.cpp43
-rw-r--r--lib/Frontend/MultiplexConsumer.cpp117
-rw-r--r--lib/Frontend/PrintPreprocessedOutput.cpp5
-rw-r--r--lib/Frontend/Rewrite/FixItRewriter.cpp26
-rw-r--r--lib/Frontend/Rewrite/FrontendActions.cpp14
-rw-r--r--lib/Frontend/Rewrite/HTMLPrint.cpp11
-rw-r--r--lib/Frontend/Rewrite/InclusionRewriter.cpp153
-rw-r--r--lib/Frontend/Rewrite/RewriteModernObjC.cpp102
-rw-r--r--lib/Frontend/Rewrite/RewriteObjC.cpp102
-rw-r--r--lib/Frontend/SerializedDiagnosticPrinter.cpp257
-rw-r--r--lib/Frontend/SerializedDiagnosticReader.cpp295
-rw-r--r--lib/Frontend/TextDiagnostic.cpp24
-rw-r--r--lib/Frontend/VerifyDiagnosticConsumer.cpp69
-rw-r--r--lib/FrontendTool/ExecuteCompilerInvocation.cpp2
-rw-r--r--lib/Headers/CMakeLists.txt23
-rw-r--r--lib/Headers/Intrin.h88
-rw-r--r--lib/Headers/__stddef_max_align_t.h40
-rw-r--r--lib/Headers/adxintrin.h83
-rw-r--r--lib/Headers/altivec.h411
-rw-r--r--lib/Headers/arm_acle.h113
-rw-r--r--lib/Headers/avx512bwintrin.h60
-rw-r--r--lib/Headers/avx512erintrin.h112
-rw-r--r--lib/Headers/avx512fintrin.h1036
-rw-r--r--lib/Headers/avx512vlbwintrin.h83
-rw-r--r--lib/Headers/avx512vlintrin.h83
-rw-r--r--lib/Headers/bmiintrin.h6
-rw-r--r--lib/Headers/cpuid.h84
-rw-r--r--lib/Headers/emmintrin.h48
-rw-r--r--lib/Headers/float.h2
-rw-r--r--lib/Headers/immintrin.h76
-rw-r--r--lib/Headers/lzcntintrin.h18
-rw-r--r--lib/Headers/module.modulemap22
-rw-r--r--lib/Headers/shaintrin.h12
-rw-r--r--lib/Headers/stdatomic.h190
-rw-r--r--lib/Headers/stddef.h29
-rw-r--r--lib/Headers/unwind.h6
-rw-r--r--lib/Headers/vadefs.h65
-rw-r--r--lib/Headers/xmmintrin.h48
-rw-r--r--lib/Index/CMakeLists.txt3
-rw-r--r--lib/Index/CommentToXML.cpp10
-rw-r--r--lib/Index/SimpleFormatContext.h9
-rw-r--r--lib/Index/USRGeneration.cpp54
-rw-r--r--lib/Lex/HeaderMap.cpp10
-rw-r--r--lib/Lex/HeaderSearch.cpp302
-rw-r--r--lib/Lex/Lexer.cpp48
-rw-r--r--lib/Lex/LiteralSupport.cpp16
-rw-r--r--lib/Lex/MacroArgs.cpp5
-rw-r--r--lib/Lex/ModuleMap.cpp408
-rw-r--r--lib/Lex/PPDirectives.cpp339
-rw-r--r--lib/Lex/PPExpressions.cpp22
-rw-r--r--lib/Lex/PPLexerChange.cpp68
-rw-r--r--lib/Lex/PPMacroExpansion.cpp357
-rw-r--r--lib/Lex/PTHLexer.cpp74
-rw-r--r--lib/Lex/Pragma.cpp23
-rw-r--r--lib/Lex/Preprocessor.cpp114
-rw-r--r--lib/Lex/ScratchBuffer.cpp9
-rw-r--r--lib/Lex/TokenConcatenation.cpp20
-rw-r--r--lib/Lex/TokenLexer.cpp29
-rw-r--r--lib/Lex/UnicodeCharSets.h4
-rw-r--r--lib/Parse/ParseAST.cpp7
-rw-r--r--lib/Parse/ParseCXXInlineMethods.cpp203
-rw-r--r--lib/Parse/ParseDecl.cpp460
-rw-r--r--lib/Parse/ParseDeclCXX.cpp362
-rw-r--r--lib/Parse/ParseExpr.cpp260
-rw-r--r--lib/Parse/ParseExprCXX.cpp198
-rw-r--r--lib/Parse/ParseInit.cpp6
-rw-r--r--lib/Parse/ParseObjc.cpp164
-rw-r--r--lib/Parse/ParseOpenMP.cpp71
-rw-r--r--lib/Parse/ParsePragma.cpp313
-rw-r--r--lib/Parse/ParseStmt.cpp83
-rw-r--r--lib/Parse/ParseStmtAsm.cpp61
-rw-r--r--lib/Parse/ParseTemplate.cpp25
-rw-r--r--lib/Parse/ParseTentative.cpp37
-rw-r--r--lib/Parse/Parser.cpp127
-rw-r--r--lib/Parse/RAIIObjectsForParser.h4
-rw-r--r--lib/Rewrite/CMakeLists.txt1
-rw-r--r--lib/Rewrite/RewriteRope.cpp14
-rw-r--r--lib/Rewrite/Rewriter.cpp33
-rw-r--r--lib/Sema/AnalysisBasedWarnings.cpp191
-rw-r--r--lib/Sema/AttributeList.cpp8
-rw-r--r--lib/Sema/CMakeLists.txt1
-rw-r--r--lib/Sema/DeclSpec.cpp49
-rw-r--r--lib/Sema/IdentifierResolver.cpp3
-rw-r--r--lib/Sema/JumpDiagnostics.cpp22
-rw-r--r--lib/Sema/MultiplexExternalSemaSource.cpp6
-rw-r--r--lib/Sema/Scope.cpp9
-rw-r--r--lib/Sema/ScopeInfo.cpp19
-rw-r--r--lib/Sema/Sema.cpp52
-rw-r--r--lib/Sema/SemaAccess.cpp4
-rw-r--r--lib/Sema/SemaAttr.cpp26
-rw-r--r--lib/Sema/SemaCUDA.cpp263
-rw-r--r--lib/Sema/SemaCXXScopeSpec.cpp65
-rw-r--r--lib/Sema/SemaCast.cpp54
-rw-r--r--lib/Sema/SemaChecking.cpp716
-rw-r--r--lib/Sema/SemaCodeComplete.cpp111
-rw-r--r--lib/Sema/SemaDecl.cpp784
-rw-r--r--lib/Sema/SemaDeclAttr.cpp823
-rw-r--r--lib/Sema/SemaDeclCXX.cpp1207
-rw-r--r--lib/Sema/SemaDeclObjC.cpp184
-rw-r--r--lib/Sema/SemaExceptionSpec.cpp117
-rw-r--r--lib/Sema/SemaExpr.cpp798
-rw-r--r--lib/Sema/SemaExprCXX.cpp411
-rw-r--r--lib/Sema/SemaExprMember.cpp132
-rw-r--r--lib/Sema/SemaExprObjC.cpp172
-rw-r--r--lib/Sema/SemaInit.cpp198
-rw-r--r--lib/Sema/SemaLambda.cpp27
-rw-r--r--lib/Sema/SemaLookup.cpp759
-rw-r--r--lib/Sema/SemaObjCProperty.cpp52
-rw-r--r--lib/Sema/SemaOpenMP.cpp1694
-rw-r--r--lib/Sema/SemaOverload.cpp616
-rw-r--r--lib/Sema/SemaPseudoObject.cpp9
-rw-r--r--lib/Sema/SemaStmt.cpp448
-rw-r--r--lib/Sema/SemaStmtAsm.cpp200
-rw-r--r--lib/Sema/SemaStmtAttr.cpp128
-rw-r--r--lib/Sema/SemaTemplate.cpp244
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp65
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp248
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp470
-rw-r--r--lib/Sema/SemaTemplateVariadic.cpp167
-rw-r--r--lib/Sema/SemaType.cpp215
-rw-r--r--lib/Sema/TreeTransform.h662
-rw-r--r--lib/Sema/TypeLocBuilder.h4
-rw-r--r--lib/Serialization/ASTCommon.cpp18
-rw-r--r--lib/Serialization/ASTCommon.h13
-rw-r--r--lib/Serialization/ASTReader.cpp1341
-rw-r--r--lib/Serialization/ASTReaderDecl.cpp630
-rw-r--r--lib/Serialization/ASTReaderInternals.h11
-rw-r--r--lib/Serialization/ASTReaderStmt.cpp195
-rw-r--r--lib/Serialization/ASTWriter.cpp883
-rw-r--r--lib/Serialization/ASTWriterDecl.cpp160
-rw-r--r--lib/Serialization/ASTWriterStmt.cpp140
-rw-r--r--lib/Serialization/GlobalModuleIndex.cpp25
-rw-r--r--lib/Serialization/Module.cpp2
-rw-r--r--lib/Serialization/ModuleManager.cpp84
-rw-r--r--lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h4
-rw-r--r--lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp13
-rw-r--r--lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringChecker.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp39
-rw-r--r--lib/StaticAnalyzer/Checkers/Checkers.td8
-rw-r--r--lib/StaticAnalyzer/Checkers/ClangSACheckers.h4
-rw-r--r--lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp9
-rw-r--r--lib/StaticAnalyzer/Checkers/InterCheckerAPI.h4
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocChecker.cpp186
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp19
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp49
-rw-r--r--lib/StaticAnalyzer/Checkers/SelectorExtras.h8
-rw-r--r--lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp14
-rw-r--r--lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp66
-rw-r--r--lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp26
-rw-r--r--lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp13
-rw-r--r--lib/StaticAnalyzer/Core/AnalysisManager.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/AnalyzerOptions.cpp19
-rw-r--r--lib/StaticAnalyzer/Core/BugReporter.cpp135
-rw-r--r--lib/StaticAnalyzer/Core/BugReporterVisitors.cpp74
-rw-r--r--lib/StaticAnalyzer/Core/CallEvent.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/CoreEngine.cpp67
-rw-r--r--lib/StaticAnalyzer/Core/ExplodedGraph.cpp15
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngine.cpp125
-rw-r--r--lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/MemRegion.cpp15
-rw-r--r--lib/StaticAnalyzer/Core/PathDiagnostic.cpp11
-rw-r--r--lib/StaticAnalyzer/Core/PlistDiagnostics.cpp10
-rw-r--r--lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h4
-rw-r--r--lib/StaticAnalyzer/Core/ProgramState.cpp4
-rw-r--r--lib/StaticAnalyzer/Core/RangeConstraintManager.cpp4
-rw-r--r--lib/StaticAnalyzer/Core/RegionStore.cpp11
-rw-r--r--lib/StaticAnalyzer/Core/SimpleConstraintManager.h4
-rw-r--r--lib/StaticAnalyzer/Core/Store.cpp13
-rw-r--r--lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp62
-rw-r--r--lib/StaticAnalyzer/Frontend/CMakeLists.txt3
-rw-r--r--lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp16
-rw-r--r--lib/StaticAnalyzer/Frontend/FrontendActions.cpp19
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelConsumer.cpp42
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelInjector.cpp117
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelInjector.h74
-rw-r--r--lib/Tooling/ArgumentsAdjusters.cpp83
-rw-r--r--lib/Tooling/CMakeLists.txt3
-rw-r--r--lib/Tooling/CommonOptionsParser.cpp63
-rw-r--r--lib/Tooling/CompilationDatabase.cpp38
-rw-r--r--lib/Tooling/Core/CMakeLists.txt10
-rw-r--r--lib/Tooling/Core/Makefile13
-rw-r--r--lib/Tooling/Core/Replacement.cpp289
-rw-r--r--lib/Tooling/JSONCompilationDatabase.cpp18
-rw-r--r--lib/Tooling/Makefile1
-rw-r--r--lib/Tooling/Refactoring.cpp246
-rw-r--r--lib/Tooling/Tooling.cpp162
401 files changed, 42800 insertions, 20633 deletions
diff --git a/lib/ARCMigrate/ARCMT.cpp b/lib/ARCMigrate/ARCMT.cpp
index 8a13b2ee4f1d..dddc886a269c 100644
--- a/lib/ARCMigrate/ARCMT.cpp
+++ b/lib/ARCMigrate/ARCMT.cpp
@@ -446,11 +446,11 @@ public:
ARCMTMacroTrackerAction(std::vector<SourceLocation> &ARCMTMacroLocs)
: ARCMTMacroLocs(ARCMTMacroLocs) { }
- ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) override {
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override {
CI.getPreprocessor().addPPCallbacks(
- new ARCMTMacroTrackerPPCallbacks(ARCMTMacroLocs));
- return new ASTConsumer();
+ llvm::make_unique<ARCMTMacroTrackerPPCallbacks>(ARCMTMacroLocs));
+ return llvm::make_unique<ASTConsumer>();
}
};
@@ -597,11 +597,12 @@ bool MigrationProcess::applyTransform(TransformFn trans,
llvm::raw_svector_ostream vecOS(newText);
buf.write(vecOS);
vecOS.flush();
- llvm::MemoryBuffer *memBuf = llvm::MemoryBuffer::getMemBufferCopy(
- StringRef(newText.data(), newText.size()), newFname);
+ std::unique_ptr<llvm::MemoryBuffer> memBuf(
+ llvm::MemoryBuffer::getMemBufferCopy(
+ StringRef(newText.data(), newText.size()), newFname));
SmallString<64> filePath(file->getName());
Unit->getFileManager().FixupRelativePath(filePath);
- Remapper.remap(filePath.str(), memBuf);
+ Remapper.remap(filePath.str(), std::move(memBuf));
}
return false;
diff --git a/lib/ARCMigrate/FileRemapper.cpp b/lib/ARCMigrate/FileRemapper.cpp
index 40e606090064..72a55da5d50b 100644
--- a/lib/ARCMigrate/FileRemapper.cpp
+++ b/lib/ARCMigrate/FileRemapper.cpp
@@ -58,9 +58,7 @@ bool FileRemapper::initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
assert(FromToMappings.empty() &&
"initFromDisk should be called before any remap calls");
std::string infoFile = filePath;
- bool fileExists = false;
- llvm::sys::fs::exists(infoFile, fileExists);
- if (!fileExists)
+ if (!llvm::sys::fs::exists(infoFile))
return false;
std::vector<std::pair<const FileEntry *, const FileEntry *> > pairs;
@@ -122,11 +120,11 @@ bool FileRemapper::flushToDisk(StringRef outputDir, DiagnosticsEngine &Diag) {
bool FileRemapper::flushToFile(StringRef outputPath, DiagnosticsEngine &Diag) {
using namespace llvm::sys;
- std::string errMsg;
+ std::error_code EC;
std::string infoFile = outputPath;
- llvm::raw_fd_ostream infoOut(infoFile.c_str(), errMsg, llvm::sys::fs::F_None);
- if (!errMsg.empty())
- return report(errMsg, Diag);
+ llvm::raw_fd_ostream infoOut(infoFile, EC, llvm::sys::fs::F_None);
+ if (EC)
+ return report(EC.message(), Diag);
for (MappingsTy::iterator
I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
@@ -173,16 +171,14 @@ bool FileRemapper::overwriteOriginal(DiagnosticsEngine &Diag,
I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
const FileEntry *origFE = I->first;
assert(I->second.is<llvm::MemoryBuffer *>());
- bool fileExists = false;
- fs::exists(origFE->getName(), fileExists);
- if (!fileExists)
+ if (!fs::exists(origFE->getName()))
return report(StringRef("File does not exist: ") + origFE->getName(),
Diag);
- std::string errMsg;
- llvm::raw_fd_ostream Out(origFE->getName(), errMsg, llvm::sys::fs::F_None);
- if (!errMsg.empty())
- return report(errMsg, Diag);
+ std::error_code EC;
+ llvm::raw_fd_ostream Out(origFE->getName(), EC, llvm::sys::fs::F_None);
+ if (EC)
+ return report(EC.message(), Diag);
llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
Out.write(mem->getBufferStart(), mem->getBufferSize());
@@ -207,15 +203,17 @@ void FileRemapper::applyMappings(PreprocessorOptions &PPOpts) const {
PPOpts.RetainRemappedFileBuffers = true;
}
-void FileRemapper::remap(StringRef filePath, llvm::MemoryBuffer *memBuf) {
- remap(getOriginalFile(filePath), memBuf);
+void FileRemapper::remap(StringRef filePath,
+ std::unique_ptr<llvm::MemoryBuffer> memBuf) {
+ remap(getOriginalFile(filePath), std::move(memBuf));
}
-void FileRemapper::remap(const FileEntry *file, llvm::MemoryBuffer *memBuf) {
+void FileRemapper::remap(const FileEntry *file,
+ std::unique_ptr<llvm::MemoryBuffer> memBuf) {
assert(file);
Target &targ = FromToMappings[file];
resetTarget(targ);
- targ = memBuf;
+ targ = memBuf.release();
}
void FileRemapper::remap(const FileEntry *file, const FileEntry *newfile) {
diff --git a/lib/ARCMigrate/Internals.h b/lib/ARCMigrate/Internals.h
index a65b329c5b03..4f153b1ad2f4 100644
--- a/lib/ARCMigrate/Internals.h
+++ b/lib/ARCMigrate/Internals.h
@@ -73,7 +73,7 @@ public:
bool clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range);
bool clearAllDiagnostics(SourceRange range) {
- return clearDiagnostic(ArrayRef<unsigned>(), range);
+ return clearDiagnostic(None, range);
}
bool clearDiagnostic(unsigned ID1, unsigned ID2, SourceRange range) {
unsigned IDs[] = { ID1, ID2 };
diff --git a/lib/ARCMigrate/ObjCMT.cpp b/lib/ARCMigrate/ObjCMT.cpp
index 1a2055e9c452..52c424c000f8 100644
--- a/lib/ARCMigrate/ObjCMT.cpp
+++ b/lib/ARCMigrate/ObjCMT.cpp
@@ -29,6 +29,7 @@
#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/StaticAnalyzer/Checkers/ObjCRetainCount.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/YAMLParser.h"
@@ -81,6 +82,8 @@ class ObjCMigrateASTConsumer : public ASTConsumer {
void inferDesignatedInitializers(ASTContext &Ctx,
const ObjCImplementationDecl *ImplD);
+
+ bool InsertFoundation(ASTContext &Ctx, SourceLocation Loc);
public:
std::string MigrateDir;
@@ -95,10 +98,11 @@ public:
const PPConditionalDirectiveRecord *PPRec;
Preprocessor &PP;
bool IsOutputFile;
+ bool FoundationIncluded;
llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ObjCProtocolDecls;
llvm::SmallVector<const Decl *, 8> CFFunctionIBCandidates;
- llvm::StringMap<char> WhiteListFilenames;
-
+ llvm::StringSet<> WhiteListFilenames;
+
ObjCMigrateASTConsumer(StringRef migrateDir,
unsigned astMigrateActions,
FileRemapper &remapper,
@@ -111,12 +115,12 @@ public:
ASTMigrateActions(astMigrateActions),
NSIntegerTypedefed(nullptr), NSUIntegerTypedefed(nullptr),
Remapper(remapper), FileMgr(fileMgr), PPRec(PPRec), PP(PP),
- IsOutputFile(isOutputFile) {
+ IsOutputFile(isOutputFile),
+ FoundationIncluded(false){
- for (ArrayRef<std::string>::iterator
- I = WhiteList.begin(), E = WhiteList.end(); I != E; ++I) {
- WhiteListFilenames.GetOrCreateValue(*I);
- }
+ // FIXME: StringSet should have insert(iter, iter) to use here.
+ for (const std::string &Val : WhiteList)
+ WhiteListFilenames.insert(Val);
}
protected:
@@ -185,23 +189,17 @@ ObjCMigrateAction::ObjCMigrateAction(FrontendAction *WrappedAction,
MigrateDir = "."; // user current directory if none is given.
}
-ASTConsumer *ObjCMigrateAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+ObjCMigrateAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
PPConditionalDirectiveRecord *
PPRec = new PPConditionalDirectiveRecord(CompInst->getSourceManager());
- CompInst->getPreprocessor().addPPCallbacks(PPRec);
- ASTConsumer *
- WrappedConsumer = WrapperFrontendAction::CreateASTConsumer(CI, InFile);
- ASTConsumer *MTConsumer = new ObjCMigrateASTConsumer(MigrateDir,
- ObjCMigAction,
- Remapper,
- CompInst->getFileManager(),
- PPRec,
- CompInst->getPreprocessor(),
- false,
- ArrayRef<std::string>());
- ASTConsumer *Consumers[] = { MTConsumer, WrappedConsumer };
- return new MultiplexConsumer(Consumers);
+ CI.getPreprocessor().addPPCallbacks(std::unique_ptr<PPCallbacks>(PPRec));
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
+ Consumers.push_back(WrapperFrontendAction::CreateASTConsumer(CI, InFile));
+ Consumers.push_back(llvm::make_unique<ObjCMigrateASTConsumer>(
+ MigrateDir, ObjCMigAction, Remapper, CompInst->getFileManager(), PPRec,
+ CompInst->getPreprocessor(), false, None));
+ return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
}
bool ObjCMigrateAction::BeginInvocation(CompilerInstance &CI) {
@@ -213,6 +211,110 @@ bool ObjCMigrateAction::BeginInvocation(CompilerInstance &CI) {
}
namespace {
+ // FIXME. This duplicates one in RewriteObjCFoundationAPI.cpp
+ bool subscriptOperatorNeedsParens(const Expr *FullExpr) {
+ const Expr* Expr = FullExpr->IgnoreImpCasts();
+ if (isa<ArraySubscriptExpr>(Expr) ||
+ isa<CallExpr>(Expr) ||
+ isa<DeclRefExpr>(Expr) ||
+ isa<CXXNamedCastExpr>(Expr) ||
+ isa<CXXConstructExpr>(Expr) ||
+ isa<CXXThisExpr>(Expr) ||
+ isa<CXXTypeidExpr>(Expr) ||
+ isa<CXXUnresolvedConstructExpr>(Expr) ||
+ isa<ObjCMessageExpr>(Expr) ||
+ isa<ObjCPropertyRefExpr>(Expr) ||
+ isa<ObjCProtocolExpr>(Expr) ||
+ isa<MemberExpr>(Expr) ||
+ isa<ObjCIvarRefExpr>(Expr) ||
+ isa<ParenExpr>(FullExpr) ||
+ isa<ParenListExpr>(Expr) ||
+ isa<SizeOfPackExpr>(Expr))
+ return false;
+
+ return true;
+ }
+
+ /// \brief - Rewrite message expression for Objective-C setter and getters into
+ /// property-dot syntax.
+ bool rewriteToPropertyDotSyntax(const ObjCMessageExpr *Msg,
+ Preprocessor &PP,
+ const NSAPI &NS, edit::Commit &commit,
+ const ParentMap *PMap) {
+ if (!Msg || Msg->isImplicit() ||
+ (Msg->getReceiverKind() != ObjCMessageExpr::Instance &&
+ Msg->getReceiverKind() != ObjCMessageExpr::SuperInstance))
+ return false;
+ const ObjCMethodDecl *Method = Msg->getMethodDecl();
+ if (!Method)
+ return false;
+ if (!Method->isPropertyAccessor())
+ return false;
+
+ const ObjCInterfaceDecl *IFace =
+ NS.getASTContext().getObjContainingInterface(Method);
+ if (!IFace)
+ return false;
+
+ const ObjCPropertyDecl *Prop = Method->findPropertyDecl();
+ if (!Prop)
+ return false;
+
+ SourceRange MsgRange = Msg->getSourceRange();
+ bool ReceiverIsSuper =
+ (Msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
+ // for 'super' receiver is nullptr.
+ const Expr *receiver = Msg->getInstanceReceiver();
+ bool NeedsParen =
+ ReceiverIsSuper ? false : subscriptOperatorNeedsParens(receiver);
+ bool IsGetter = (Msg->getNumArgs() == 0);
+ if (IsGetter) {
+ // Find space location range between receiver expression and getter method.
+ SourceLocation BegLoc =
+ ReceiverIsSuper ? Msg->getSuperLoc() : receiver->getLocEnd();
+ BegLoc = PP.getLocForEndOfToken(BegLoc);
+ SourceLocation EndLoc = Msg->getSelectorLoc(0);
+ SourceRange SpaceRange(BegLoc, EndLoc);
+ std::string PropertyDotString;
+ // rewrite getter method expression into: receiver.property or
+ // (receiver).property
+ if (NeedsParen) {
+ commit.insertBefore(receiver->getLocStart(), "(");
+ PropertyDotString = ").";
+ }
+ else
+ PropertyDotString = ".";
+ PropertyDotString += Prop->getName();
+ commit.replace(SpaceRange, PropertyDotString);
+
+ // remove '[' ']'
+ commit.replace(SourceRange(MsgRange.getBegin(), MsgRange.getBegin()), "");
+ commit.replace(SourceRange(MsgRange.getEnd(), MsgRange.getEnd()), "");
+ } else {
+ if (NeedsParen)
+ commit.insertWrap("(", receiver->getSourceRange(), ")");
+ std::string PropertyDotString = ".";
+ PropertyDotString += Prop->getName();
+ PropertyDotString += " =";
+ const Expr*const* Args = Msg->getArgs();
+ const Expr *RHS = Args[0];
+ if (!RHS)
+ return false;
+ SourceLocation BegLoc =
+ ReceiverIsSuper ? Msg->getSuperLoc() : receiver->getLocEnd();
+ BegLoc = PP.getLocForEndOfToken(BegLoc);
+ SourceLocation EndLoc = RHS->getLocStart();
+ EndLoc = EndLoc.getLocWithOffset(-1);
+ SourceRange Range(BegLoc, EndLoc);
+ commit.replace(Range, PropertyDotString);
+ // remove '[' ']'
+ commit.replace(SourceRange(MsgRange.getBegin(), MsgRange.getBegin()), "");
+ commit.replace(SourceRange(MsgRange.getEnd(), MsgRange.getEnd()), "");
+ }
+ return true;
+ }
+
+
class ObjCMigrator : public RecursiveASTVisitor<ObjCMigrator> {
ObjCMigrateASTConsumer &Consumer;
ParentMap &PMap;
@@ -237,6 +339,13 @@ public:
Consumer.Editor->commit(commit);
}
+ if (Consumer.ASTMigrateActions & FrontendOptions::ObjCMT_PropertyDotSyntax) {
+ edit::Commit commit(*Consumer.Editor);
+ rewriteToPropertyDotSyntax(E, Consumer.PP, *Consumer.NSAPIObj,
+ commit, &PMap);
+ Consumer.Editor->commit(commit);
+ }
+
return true;
}
@@ -590,18 +699,32 @@ static bool rewriteToObjCInterfaceDecl(const ObjCInterfaceDecl *IDecl,
return true;
}
+static StringRef GetUnsignedName(StringRef NSIntegerName) {
+ StringRef UnsignedName = llvm::StringSwitch<StringRef>(NSIntegerName)
+ .Case("int8_t", "uint8_t")
+ .Case("int16_t", "uint16_t")
+ .Case("int32_t", "uint32_t")
+ .Case("NSInteger", "NSUInteger")
+ .Case("int64_t", "uint64_t")
+ .Default(NSIntegerName);
+ return UnsignedName;
+}
+
static bool rewriteToNSEnumDecl(const EnumDecl *EnumDcl,
const TypedefDecl *TypedefDcl,
const NSAPI &NS, edit::Commit &commit,
- bool IsNSIntegerType,
+ StringRef NSIntegerName,
bool NSOptions) {
std::string ClassString;
- if (NSOptions)
- ClassString = "typedef NS_OPTIONS(NSUInteger, ";
- else
- ClassString =
- IsNSIntegerType ? "typedef NS_ENUM(NSInteger, "
- : "typedef NS_ENUM(NSUInteger, ";
+ if (NSOptions) {
+ ClassString = "typedef NS_OPTIONS(";
+ ClassString += GetUnsignedName(NSIntegerName);
+ }
+ else {
+ ClassString = "typedef NS_ENUM(";
+ ClassString += NSIntegerName;
+ }
+ ClassString += ", ";
ClassString += TypedefDcl->getIdentifier()->getName();
ClassString += ')';
@@ -640,18 +763,31 @@ static bool rewriteToNSEnumDecl(const EnumDecl *EnumDcl,
return false;
}
-static void rewriteToNSMacroDecl(const EnumDecl *EnumDcl,
+static void rewriteToNSMacroDecl(ASTContext &Ctx,
+ const EnumDecl *EnumDcl,
const TypedefDecl *TypedefDcl,
const NSAPI &NS, edit::Commit &commit,
bool IsNSIntegerType) {
- std::string ClassString =
- IsNSIntegerType ? "NS_ENUM(NSInteger, " : "NS_OPTIONS(NSUInteger, ";
+ QualType EnumUnderlyingT = EnumDcl->getPromotionType();
+ assert(!EnumUnderlyingT.isNull()
+ && "rewriteToNSMacroDecl - underlying enum type is null");
+
+ PrintingPolicy Policy(Ctx.getPrintingPolicy());
+ std::string TypeString = EnumUnderlyingT.getAsString(Policy);
+ std::string ClassString = IsNSIntegerType ? "NS_ENUM(" : "NS_OPTIONS(";
+ ClassString += TypeString;
+ ClassString += ", ";
+
ClassString += TypedefDcl->getIdentifier()->getName();
ClassString += ')';
SourceRange R(EnumDcl->getLocStart(), EnumDcl->getLocStart());
commit.replace(R, ClassString);
- SourceLocation TypedefLoc = TypedefDcl->getLocEnd();
- commit.remove(SourceRange(TypedefLoc, TypedefLoc));
+ // This is to remove spaces between '}' and typedef name.
+ SourceLocation StartTypedefLoc = EnumDcl->getLocEnd();
+ StartTypedefLoc = StartTypedefLoc.getLocWithOffset(+1);
+ SourceLocation EndTypedefLoc = TypedefDcl->getLocEnd();
+
+ commit.remove(SourceRange(StartTypedefLoc, EndTypedefLoc));
}
static bool UseNSOptionsMacro(Preprocessor &PP, ASTContext &Ctx,
@@ -706,11 +842,9 @@ void ObjCMigrateASTConsumer::migrateProtocolConformance(ASTContext &Ctx,
Ctx.CollectInheritedProtocols(IDecl, ExplicitProtocols);
llvm::SmallVector<ObjCProtocolDecl *, 8> PotentialImplicitProtocols;
- for (llvm::SmallPtrSet<ObjCProtocolDecl*, 32>::iterator I =
- ObjCProtocolDecls.begin(),
- E = ObjCProtocolDecls.end(); I != E; ++I)
- if (!ExplicitProtocols.count(*I))
- PotentialImplicitProtocols.push_back(*I);
+ for (ObjCProtocolDecl *ProtDecl : ObjCProtocolDecls)
+ if (!ExplicitProtocols.count(ProtDecl))
+ PotentialImplicitProtocols.push_back(ProtDecl);
if (PotentialImplicitProtocols.empty())
return;
@@ -768,7 +902,7 @@ bool ObjCMigrateASTConsumer::migrateNSEnumDecl(ASTContext &Ctx,
const EnumDecl *EnumDcl,
const TypedefDecl *TypedefDcl) {
if (!EnumDcl->isCompleteDefinition() || EnumDcl->getIdentifier() ||
- EnumDcl->isDeprecated())
+ EnumDcl->isDeprecated() || EnumDcl->getIntegerTypeSourceInfo())
return false;
if (!TypedefDcl) {
if (NSIntegerTypedefed) {
@@ -792,22 +926,17 @@ bool ObjCMigrateASTConsumer::migrateNSEnumDecl(ASTContext &Ctx,
return false;
QualType qt = TypedefDcl->getTypeSourceInfo()->getType();
- bool IsNSIntegerType = NSAPIObj->isObjCNSIntegerType(qt);
- bool IsNSUIntegerType = !IsNSIntegerType && NSAPIObj->isObjCNSUIntegerType(qt);
+ StringRef NSIntegerName = NSAPIObj->GetNSIntegralKind(qt);
- if (!IsNSIntegerType && !IsNSUIntegerType) {
+ if (NSIntegerName.empty()) {
// Also check for typedef enum {...} TD;
if (const EnumType *EnumTy = qt->getAs<EnumType>()) {
if (EnumTy->getDecl() == EnumDcl) {
bool NSOptions = UseNSOptionsMacro(PP, Ctx, EnumDcl);
- if (NSOptions) {
- if (!Ctx.Idents.get("NS_OPTIONS").hasMacroDefinition())
- return false;
- }
- else if (!Ctx.Idents.get("NS_ENUM").hasMacroDefinition())
+ if (!InsertFoundation(Ctx, TypedefDcl->getLocStart()))
return false;
edit::Commit commit(*Editor);
- rewriteToNSMacroDecl(EnumDcl, TypedefDcl, *NSAPIObj, commit, !NSOptions);
+ rewriteToNSMacroDecl(Ctx, EnumDcl, TypedefDcl, *NSAPIObj, commit, !NSOptions);
Editor->commit(commit);
return true;
}
@@ -817,15 +946,11 @@ bool ObjCMigrateASTConsumer::migrateNSEnumDecl(ASTContext &Ctx,
// We may still use NS_OPTIONS based on what we find in the enumertor list.
bool NSOptions = UseNSOptionsMacro(PP, Ctx, EnumDcl);
- // NS_ENUM must be available.
- if (IsNSIntegerType && !Ctx.Idents.get("NS_ENUM").hasMacroDefinition())
- return false;
- // NS_OPTIONS must be available.
- if (IsNSUIntegerType && !Ctx.Idents.get("NS_OPTIONS").hasMacroDefinition())
+ if (!InsertFoundation(Ctx, TypedefDcl->getLocStart()))
return false;
edit::Commit commit(*Editor);
bool Res = rewriteToNSEnumDecl(EnumDcl, TypedefDcl, *NSAPIObj,
- commit, IsNSIntegerType, NSOptions);
+ commit, NSIntegerName, NSOptions);
Editor->commit(commit);
return Res;
}
@@ -1606,6 +1731,22 @@ void ObjCMigrateASTConsumer::inferDesignatedInitializers(
}
}
+bool ObjCMigrateASTConsumer::InsertFoundation(ASTContext &Ctx,
+ SourceLocation Loc) {
+ if (FoundationIncluded)
+ return true;
+ if (Loc.isInvalid())
+ return false;
+ edit::Commit commit(*Editor);
+ if (Ctx.getLangOpts().Modules)
+ commit.insert(Loc, "#ifndef NS_ENUM\n@import Foundation;\n#endif\n");
+ else
+ commit.insert(Loc, "#ifndef NS_ENUM\n#import <Foundation/Foundation.h>\n#endif\n");
+ Editor->commit(commit);
+ FoundationIncluded = true;
+ return true;
+}
+
namespace {
class RewritesReceiver : public edit::EditsReceiver {
@@ -1799,12 +1940,12 @@ void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
}
if (IsOutputFile) {
- std::string Error;
- llvm::raw_fd_ostream OS(MigrateDir.c_str(), Error, llvm::sys::fs::F_None);
- if (!Error.empty()) {
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(MigrateDir, EC, llvm::sys::fs::F_None);
+ if (EC) {
DiagnosticsEngine &Diags = Ctx.getDiagnostics();
Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0"))
- << Error;
+ << EC.message();
return;
}
@@ -1827,11 +1968,12 @@ void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
llvm::raw_svector_ostream vecOS(newText);
buf.write(vecOS);
vecOS.flush();
- llvm::MemoryBuffer *memBuf = llvm::MemoryBuffer::getMemBufferCopy(
- StringRef(newText.data(), newText.size()), file->getName());
+ std::unique_ptr<llvm::MemoryBuffer> memBuf(
+ llvm::MemoryBuffer::getMemBufferCopy(
+ StringRef(newText.data(), newText.size()), file->getName()));
SmallString<64> filePath(file->getName());
FileMgr.FixupRelativePath(filePath);
- Remapper.remap(filePath.str(), memBuf);
+ Remapper.remap(filePath.str(), std::move(memBuf));
}
if (IsOutputFile) {
@@ -1865,8 +2007,8 @@ static std::vector<std::string> getWhiteListFilenames(StringRef DirPath) {
return Filenames;
}
-ASTConsumer *MigrateSourceAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+MigrateSourceAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
PPConditionalDirectiveRecord *
PPRec = new PPConditionalDirectiveRecord(CI.getSourceManager());
unsigned ObjCMTAction = CI.getFrontendOpts().ObjCMTAction;
@@ -1880,17 +2022,13 @@ ASTConsumer *MigrateSourceAction::CreateASTConsumer(CompilerInstance &CI,
ObjCMTAction |= FrontendOptions::ObjCMT_Literals |
FrontendOptions::ObjCMT_Subscripting;
}
- CI.getPreprocessor().addPPCallbacks(PPRec);
+ CI.getPreprocessor().addPPCallbacks(std::unique_ptr<PPCallbacks>(PPRec));
std::vector<std::string> WhiteList =
getWhiteListFilenames(CI.getFrontendOpts().ObjCMTWhiteListPath);
- return new ObjCMigrateASTConsumer(CI.getFrontendOpts().OutputFile,
- ObjCMTAction,
- Remapper,
- CI.getFileManager(),
- PPRec,
- CI.getPreprocessor(),
- /*isOutputFile=*/true,
- WhiteList);
+ return llvm::make_unique<ObjCMigrateASTConsumer>(
+ CI.getFrontendOpts().OutputFile, ObjCMTAction, Remapper,
+ CI.getFileManager(), PPRec, CI.getPreprocessor(),
+ /*isOutputFile=*/true, WhiteList);
}
namespace {
@@ -1949,7 +2087,7 @@ public:
return true;
llvm::SourceMgr SM;
- Stream YAMLStream(FileBufOrErr.get().release(), SM);
+ Stream YAMLStream(FileBufOrErr.get()->getMemBufferRef(), SM);
document_iterator I = YAMLStream.begin();
if (I == YAMLStream.end())
return true;
diff --git a/lib/ARCMigrate/PlistReporter.cpp b/lib/ARCMigrate/PlistReporter.cpp
index 6b34ef0c2b9e..53398b27af49 100644
--- a/lib/ARCMigrate/PlistReporter.cpp
+++ b/lib/ARCMigrate/PlistReporter.cpp
@@ -56,9 +56,9 @@ void arcmt::writeARCDiagsToPlist(const std::string &outPath,
}
}
- std::string errMsg;
- llvm::raw_fd_ostream o(outPath.c_str(), errMsg, llvm::sys::fs::F_Text);
- if (!errMsg.empty()) {
+ std::error_code EC;
+ llvm::raw_fd_ostream o(outPath, EC, llvm::sys::fs::F_Text);
+ if (EC) {
llvm::errs() << "error: could not create file: " << outPath << '\n';
return;
}
diff --git a/lib/ARCMigrate/TransformActions.cpp b/lib/ARCMigrate/TransformActions.cpp
index 6d178bea0907..9fb2f1d3eea8 100644
--- a/lib/ARCMigrate/TransformActions.cpp
+++ b/lib/ARCMigrate/TransformActions.cpp
@@ -581,8 +581,7 @@ void TransformActionsImpl::applyRewrites(
/// "alive". Since the vast majority of text will be the same, we also unique
/// the strings using a StringMap.
StringRef TransformActionsImpl::getUniqueText(StringRef text) {
- llvm::StringMapEntry<bool> &entry = UniqueText.GetOrCreateValue(text);
- return entry.getKey();
+ return UniqueText.insert(std::make_pair(text, false)).first->first();
}
/// \brief Computes the source location just past the end of the token at
diff --git a/lib/AST/APValue.cpp b/lib/AST/APValue.cpp
index 0fa0216d9dac..91f1e20d73b6 100644
--- a/lib/AST/APValue.cpp
+++ b/lib/AST/APValue.cpp
@@ -573,7 +573,7 @@ bool APValue::hasLValuePath() const {
ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const {
assert(isLValue() && hasLValuePath() && "Invalid accessor");
const LV &LVal = *((const LV*)(const char*)Data.buffer);
- return ArrayRef<LValuePathEntry>(LVal.getPath(), LVal.PathLength);
+ return llvm::makeArrayRef(LVal.getPath(), LVal.PathLength);
}
unsigned APValue::getLValueCallIndex() const {
@@ -623,7 +623,7 @@ ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const {
assert(isMemberPointer() && "Invalid accessor");
const MemberPointerData &MPD =
*((const MemberPointerData *)(const char *)Data.buffer);
- return ArrayRef<const CXXRecordDecl*>(MPD.getPath(), MPD.PathLength);
+ return llvm::makeArrayRef(MPD.getPath(), MPD.PathLength);
}
void APValue::MakeLValue() {
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index bccdae91d2eb..6b864d0f0ac2 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -699,9 +699,10 @@ static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T,
1, // opencl_global
2, // opencl_local
3, // opencl_constant
- 4, // cuda_device
- 5, // cuda_constant
- 6 // cuda_shared
+ 4, // opencl_generic
+ 5, // cuda_device
+ 6, // cuda_constant
+ 7 // cuda_shared
};
return &FakeAddrSpaceMap;
} else {
@@ -722,35 +723,28 @@ static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
}
-ASTContext::ASTContext(LangOptions& LOpts, SourceManager &SM,
+ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
IdentifierTable &idents, SelectorTable &sels,
Builtin::Context &builtins)
- : FunctionProtoTypes(this_()),
- TemplateSpecializationTypes(this_()),
- DependentTemplateSpecializationTypes(this_()),
- SubstTemplateTemplateParmPacks(this_()),
- GlobalNestedNameSpecifier(nullptr),
- Int128Decl(nullptr), UInt128Decl(nullptr), Float128StubDecl(nullptr),
- BuiltinVaListDecl(nullptr),
- ObjCIdDecl(nullptr), ObjCSelDecl(nullptr), ObjCClassDecl(nullptr),
- ObjCProtocolClassDecl(nullptr), BOOLDecl(nullptr),
- CFConstantStringTypeDecl(nullptr), ObjCInstanceTypeDecl(nullptr),
- FILEDecl(nullptr),
- jmp_bufDecl(nullptr), sigjmp_bufDecl(nullptr), ucontext_tDecl(nullptr),
- BlockDescriptorType(nullptr), BlockDescriptorExtendedType(nullptr),
- cudaConfigureCallDecl(nullptr),
- NullTypeSourceInfo(QualType()),
- FirstLocalImport(), LastLocalImport(),
- SourceMgr(SM), LangOpts(LOpts),
- AddrSpaceMap(nullptr), Target(nullptr), PrintingPolicy(LOpts),
- Idents(idents), Selectors(sels),
- BuiltinInfo(builtins),
- DeclarationNames(*this),
- ExternalSource(nullptr), Listener(nullptr),
- Comments(SM), CommentsLoaded(false),
- CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
- LastSDM(nullptr, 0)
-{
+ : FunctionProtoTypes(this_()), TemplateSpecializationTypes(this_()),
+ DependentTemplateSpecializationTypes(this_()),
+ SubstTemplateTemplateParmPacks(this_()),
+ GlobalNestedNameSpecifier(nullptr), Int128Decl(nullptr),
+ UInt128Decl(nullptr), Float128StubDecl(nullptr),
+ BuiltinVaListDecl(nullptr), ObjCIdDecl(nullptr), ObjCSelDecl(nullptr),
+ ObjCClassDecl(nullptr), ObjCProtocolClassDecl(nullptr), BOOLDecl(nullptr),
+ CFConstantStringTypeDecl(nullptr), ObjCInstanceTypeDecl(nullptr),
+ FILEDecl(nullptr), jmp_bufDecl(nullptr), sigjmp_bufDecl(nullptr),
+ ucontext_tDecl(nullptr), BlockDescriptorType(nullptr),
+ BlockDescriptorExtendedType(nullptr), cudaConfigureCallDecl(nullptr),
+ FirstLocalImport(), LastLocalImport(),
+ SourceMgr(SM), LangOpts(LOpts),
+ SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFile, SM)),
+ AddrSpaceMap(nullptr), Target(nullptr), PrintingPolicy(LOpts),
+ Idents(idents), Selectors(sels), BuiltinInfo(builtins),
+ DeclarationNames(*this), ExternalSource(nullptr), Listener(nullptr),
+ Comments(SM), CommentsLoaded(false),
+ CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), LastSDM(nullptr, 0) {
TUDecl = TranslationUnitDecl::Create(*this);
}
@@ -1413,9 +1407,9 @@ std::pair<CharUnits, CharUnits>
ASTContext::getTypeInfoInChars(const Type *T) const {
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(T))
return getConstantArrayInfoInChars(*this, CAT);
- std::pair<uint64_t, unsigned> Info = getTypeInfo(T);
- return std::make_pair(toCharUnitsFromBits(Info.first),
- toCharUnitsFromBits(Info.second));
+ TypeInfo Info = getTypeInfo(T);
+ return std::make_pair(toCharUnitsFromBits(Info.Width),
+ toCharUnitsFromBits(Info.Align));
}
std::pair<CharUnits, CharUnits>
@@ -1423,14 +1417,23 @@ ASTContext::getTypeInfoInChars(QualType T) const {
return getTypeInfoInChars(T.getTypePtr());
}
-std::pair<uint64_t, unsigned> ASTContext::getTypeInfo(const Type *T) const {
- TypeInfoMap::iterator it = MemoizedTypeInfo.find(T);
- if (it != MemoizedTypeInfo.end())
- return it->second;
+bool ASTContext::isAlignmentRequired(const Type *T) const {
+ return getTypeInfo(T).AlignIsRequired;
+}
+
+bool ASTContext::isAlignmentRequired(QualType T) const {
+ return isAlignmentRequired(T.getTypePtr());
+}
+
+TypeInfo ASTContext::getTypeInfo(const Type *T) const {
+ TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
+ if (I != MemoizedTypeInfo.end())
+ return I->second;
- std::pair<uint64_t, unsigned> Info = getTypeInfoImpl(T);
- MemoizedTypeInfo.insert(std::make_pair(T, Info));
- return Info;
+ // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
+ TypeInfo TI = getTypeInfoImpl(T);
+ MemoizedTypeInfo[T] = TI;
+ return TI;
}
/// getTypeInfoImpl - Return the size of the specified type, in bits. This
@@ -1439,10 +1442,10 @@ std::pair<uint64_t, unsigned> ASTContext::getTypeInfo(const Type *T) const {
/// FIXME: Pointers into different addr spaces could have different sizes and
/// alignment requirements: getPointerInfo should take an AddrSpace, this
/// should take a QualType, &c.
-std::pair<uint64_t, unsigned>
-ASTContext::getTypeInfoImpl(const Type *T) const {
- uint64_t Width=0;
- unsigned Align=8;
+TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
+ uint64_t Width = 0;
+ unsigned Align = 8;
+ bool AlignIsRequired = false;
switch (T->getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
@@ -1471,12 +1474,12 @@ ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::ConstantArray: {
const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
- std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(CAT->getElementType());
+ TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
uint64_t Size = CAT->getSize().getZExtValue();
- assert((Size == 0 || EltInfo.first <= (uint64_t)(-1)/Size) &&
+ assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
"Overflow in array type bit size evaluation");
- Width = EltInfo.first*Size;
- Align = EltInfo.second;
+ Width = EltInfo.Width * Size;
+ Align = EltInfo.Align;
if (!getTargetInfo().getCXXABI().isMicrosoft() ||
getTargetInfo().getPointerWidth(0) == 64)
Width = llvm::RoundUpToAlignment(Width, Align);
@@ -1485,8 +1488,8 @@ ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::ExtVector:
case Type::Vector: {
const VectorType *VT = cast<VectorType>(T);
- std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(VT->getElementType());
- Width = EltInfo.first*VT->getNumElements();
+ TypeInfo EltInfo = getTypeInfo(VT->getElementType());
+ Width = EltInfo.Width * VT->getNumElements();
Align = Width;
// If the alignment is not a power of 2, round up to the next power of 2.
// This happens for non-power-of-2 length vectors.
@@ -1638,10 +1641,9 @@ ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::Complex: {
// Complex types have the same alignment as their elements, but twice the
// size.
- std::pair<uint64_t, unsigned> EltInfo =
- getTypeInfo(cast<ComplexType>(T)->getElementType());
- Width = EltInfo.first*2;
- Align = EltInfo.second;
+ TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
+ Width = EltInfo.Width * 2;
+ Align = EltInfo.Align;
break;
}
case Type::ObjCObject:
@@ -1692,16 +1694,18 @@ ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::Typedef: {
const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
- std::pair<uint64_t, unsigned> Info
- = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
+ TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
// If the typedef has an aligned attribute on it, it overrides any computed
// alignment we have. This violates the GCC documentation (which says that
// attribute(aligned) can only round up) but matches its implementation.
- if (unsigned AttrAlign = Typedef->getMaxAlignment())
+ if (unsigned AttrAlign = Typedef->getMaxAlignment()) {
Align = AttrAlign;
- else
- Align = Info.second;
- Width = Info.first;
+ AlignIsRequired = true;
+ } else {
+ Align = Info.Align;
+ AlignIsRequired = Info.AlignIsRequired;
+ }
+ Width = Info.Width;
break;
}
@@ -1714,10 +1718,9 @@ ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::Atomic: {
// Start with the base type information.
- std::pair<uint64_t, unsigned> Info
- = getTypeInfo(cast<AtomicType>(T)->getValueType());
- Width = Info.first;
- Align = Info.second;
+ TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
+ Width = Info.Width;
+ Align = Info.Align;
// If the size of the type doesn't exceed the platform's max
// atomic promotion width, make the size and alignment more
@@ -1735,7 +1738,7 @@ ASTContext::getTypeInfoImpl(const Type *T) const {
}
assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
- return std::make_pair(Width, Align);
+ return TypeInfo(Width, Align, AlignIsRequired);
}
/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
@@ -1771,13 +1774,12 @@ CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
/// alignment in cases where it is beneficial for performance to overalign
/// a data type.
unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
- unsigned ABIAlign = getTypeAlign(T);
+ TypeInfo TI = getTypeInfo(T);
+ unsigned ABIAlign = TI.Align;
if (Target->getTriple().getArch() == llvm::Triple::xcore)
return ABIAlign; // Never overalign on XCore.
- const TypedefType *TT = T->getAs<TypedefType>();
-
// Double and long long should be naturally aligned if possible.
T = T->getBaseElementTypeUnsafe();
if (const ComplexType *CT = T->getAs<ComplexType>())
@@ -1787,7 +1789,7 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
T->isSpecificBuiltinType(BuiltinType::ULongLong))
// Don't increase the alignment if an alignment attribute was specified on a
// typedef declaration.
- if (!TT || !TT->getDecl()->getMaxAlignment())
+ if (!TI.AlignIsRequired)
return std::max(ABIAlign, (unsigned)getTypeSize(T));
return ABIAlign;
@@ -2110,6 +2112,62 @@ void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
L->DeducedReturnType(FD, ResultType);
}
+/// Get a function type and produce the equivalent function type with the
+/// specified exception specification. Type sugar that can be present on a
+/// declaration of a function with an exception specification is permitted
+/// and preserved. Other type sugar (for instance, typedefs) is not.
+static QualType getFunctionTypeWithExceptionSpec(
+ ASTContext &Context, QualType Orig,
+ const FunctionProtoType::ExceptionSpecInfo &ESI) {
+ // Might have some parens.
+ if (auto *PT = dyn_cast<ParenType>(Orig))
+ return Context.getParenType(
+ getFunctionTypeWithExceptionSpec(Context, PT->getInnerType(), ESI));
+
+ // Might have a calling-convention attribute.
+ if (auto *AT = dyn_cast<AttributedType>(Orig))
+ return Context.getAttributedType(
+ AT->getAttrKind(),
+ getFunctionTypeWithExceptionSpec(Context, AT->getModifiedType(), ESI),
+ getFunctionTypeWithExceptionSpec(Context, AT->getEquivalentType(),
+ ESI));
+
+ // Anything else must be a function type. Rebuild it with the new exception
+ // specification.
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(Orig);
+ return Context.getFunctionType(
+ Proto->getReturnType(), Proto->getParamTypes(),
+ Proto->getExtProtoInfo().withExceptionSpec(ESI));
+}
+
+void ASTContext::adjustExceptionSpec(
+ FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI,
+ bool AsWritten) {
+ // Update the type.
+ QualType Updated =
+ getFunctionTypeWithExceptionSpec(*this, FD->getType(), ESI);
+ FD->setType(Updated);
+
+ if (!AsWritten)
+ return;
+
+ // Update the type in the type source information too.
+ if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
+ // If the type and the type-as-written differ, we may need to update
+ // the type-as-written too.
+ if (TSInfo->getType() != FD->getType())
+ Updated = getFunctionTypeWithExceptionSpec(*this, TSInfo->getType(), ESI);
+
+ // FIXME: When we get proper type location information for exceptions,
+ // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
+ // up the TypeSourceInfo;
+ assert(TypeLoc::getFullDataSizeForType(Updated) ==
+ TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
+ "TypeLoc size mismatch from updating exception specification");
+ TSInfo->overrideType(Updated);
+ }
+}
+
/// getComplexType - Return the uniqued reference to the type for a complex
/// number with the specified element type.
QualType ASTContext::getComplexType(QualType T) const {
@@ -2840,7 +2898,7 @@ ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray,
// Determine whether the type being created is already canonical or not.
bool isCanonical =
- EPI.ExceptionSpecType == EST_None && isCanonicalResultType(ResultTy) &&
+ EPI.ExceptionSpec.Type == EST_None && isCanonicalResultType(ResultTy) &&
!EPI.HasTrailingReturn;
for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
if (!ArgArray[i].isCanonicalAsParam())
@@ -2857,8 +2915,7 @@ ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray,
FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
CanonicalEPI.HasTrailingReturn = false;
- CanonicalEPI.ExceptionSpecType = EST_None;
- CanonicalEPI.NumExceptions = 0;
+ CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
// Result types do not have ARC lifetime qualifiers.
QualType CanResultTy = getCanonicalType(ResultTy);
@@ -2886,13 +2943,13 @@ ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray,
// specification.
size_t Size = sizeof(FunctionProtoType) +
NumArgs * sizeof(QualType);
- if (EPI.ExceptionSpecType == EST_Dynamic) {
- Size += EPI.NumExceptions * sizeof(QualType);
- } else if (EPI.ExceptionSpecType == EST_ComputedNoexcept) {
+ if (EPI.ExceptionSpec.Type == EST_Dynamic) {
+ Size += EPI.ExceptionSpec.Exceptions.size() * sizeof(QualType);
+ } else if (EPI.ExceptionSpec.Type == EST_ComputedNoexcept) {
Size += sizeof(Expr*);
- } else if (EPI.ExceptionSpecType == EST_Uninstantiated) {
+ } else if (EPI.ExceptionSpec.Type == EST_Uninstantiated) {
Size += 2 * sizeof(FunctionDecl*);
- } else if (EPI.ExceptionSpecType == EST_Unevaluated) {
+ } else if (EPI.ExceptionSpec.Type == EST_Unevaluated) {
Size += sizeof(FunctionDecl*);
}
if (EPI.ConsumedParameters)
@@ -4099,7 +4156,7 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
case TemplateArgument::Declaration: {
ValueDecl *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
- return TemplateArgument(D, Arg.isDeclForReferenceParam());
+ return TemplateArgument(D, Arg.getParamTypeForDecl());
}
case TemplateArgument::NullPtr:
@@ -4188,7 +4245,8 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
}
case NestedNameSpecifier::Global:
- // The global specifier is canonical and unique.
+ case NestedNameSpecifier::Super:
+ // The global specifier and __super specifer are canonical and unique.
return NNS;
}
@@ -4414,7 +4472,11 @@ unsigned ASTContext::getIntegerRank(const Type *T) const {
QualType ASTContext::isPromotableBitField(Expr *E) const {
if (E->isTypeDependent() || E->isValueDependent())
return QualType();
-
+
+ // FIXME: We should not do this unless E->refersToBitField() is true. This
+ // matters in C where getSourceBitField() will find bit-fields for various
+ // cases where the source expression is not a bit-field designator.
+
FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
if (!Field)
return QualType();
@@ -4423,9 +4485,20 @@ QualType ASTContext::isPromotableBitField(Expr *E) const {
uint64_t BitWidth = Field->getBitWidthValue(*this);
uint64_t IntSize = getTypeSize(IntTy);
- // GCC extension compatibility: if the bit-field size is less than or equal
- // to the size of int, it gets promoted no matter what its type is.
- // For instance, unsigned long bf : 4 gets promoted to signed int.
+ // C++ [conv.prom]p5:
+ // A prvalue for an integral bit-field can be converted to a prvalue of type
+ // int if int can represent all the values of the bit-field; otherwise, it
+ // can be converted to unsigned int if unsigned int can represent all the
+ // values of the bit-field. If the bit-field is larger yet, no integral
+ // promotion applies to it.
+ // C11 6.3.1.1/2:
+ // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
+ // If an int can represent all values of the original type (as restricted by
+ // the width, for a bit-field), the value is converted to an int; otherwise,
+ // it is converted to an unsigned int.
+ //
+ // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
+ // We perform that promotion here to match GCC and C++.
if (BitWidth < IntSize)
return IntTy;
@@ -4433,9 +4506,10 @@ QualType ASTContext::isPromotableBitField(Expr *E) const {
return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
// Types bigger than int are not subject to promotions, and therefore act
- // like the base type.
- // FIXME: This doesn't quite match what gcc does, but what gcc does here
- // is ridiculous.
+ // like the base type. GCC has some weird bugs in this area that we
+ // deliberately do not follow (GCC follows a pre-standard resolution to
+ // C's DR315 which treats bit-width as being part of the type, and this leaks
+ // into their semantics in some cases).
return QualType();
}
@@ -5090,13 +5164,15 @@ void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
}
void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
- const FieldDecl *Field) const {
+ const FieldDecl *Field,
+ QualType *NotEncodedT) const {
// We follow the behavior of gcc, expanding structures which are
// directly pointed to, and expanding embedded structures. Note that
// these rules are sufficient to prevent recursive encoding of the
// same type.
getObjCEncodingForTypeImpl(T, S, true, true, Field,
- true /* outermost type */);
+ true /* outermost type */, false, false,
+ false, false, false, NotEncodedT);
}
void ASTContext::getObjCEncodingForPropertyType(QualType T,
@@ -5222,7 +5298,8 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
bool StructField,
bool EncodeBlockParameters,
bool EncodeClassNames,
- bool EncodePointerToObjCTypedef) const {
+ bool EncodePointerToObjCTypedef,
+ QualType *NotEncodedT) const {
CanQualType CT = getCanonicalType(T);
switch (CT->getTypeClass()) {
case Type::Builtin:
@@ -5238,16 +5315,14 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
case Type::Complex: {
const ComplexType *CT = T->castAs<ComplexType>();
S += 'j';
- getObjCEncodingForTypeImpl(CT->getElementType(), S, false, false, nullptr,
- false, false);
+ getObjCEncodingForTypeImpl(CT->getElementType(), S, false, false, nullptr);
return;
}
case Type::Atomic: {
const AtomicType *AT = T->castAs<AtomicType>();
S += 'A';
- getObjCEncodingForTypeImpl(AT->getValueType(), S, false, false, nullptr,
- false, false);
+ getObjCEncodingForTypeImpl(AT->getValueType(), S, false, false, nullptr);
return;
}
@@ -5318,7 +5393,8 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
getLegacyIntegralTypeEncoding(PointeeTy);
getObjCEncodingForTypeImpl(PointeeTy, S, false, ExpandPointedToStructures,
- nullptr);
+ nullptr, false, false, false, false, false, false,
+ NotEncodedT);
return;
}
@@ -5346,7 +5422,9 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
}
getObjCEncodingForTypeImpl(AT->getElementType(), S,
- false, ExpandStructures, FD);
+ false, ExpandStructures, FD,
+ false, false, false, false, false, false,
+ NotEncodedT);
S += ']';
}
return;
@@ -5378,7 +5456,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
if (ExpandStructures) {
S += '=';
if (!RDecl->isUnion()) {
- getObjCEncodingForStructureImpl(RDecl, S, FD);
+ getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT);
} else {
for (const auto *Field : RDecl->fields()) {
if (FD) {
@@ -5397,7 +5475,8 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
getObjCEncodingForTypeImpl(qt, S, false, true,
FD, /*OutermostType*/false,
/*EncodingProperty*/false,
- /*StructField*/true);
+ /*StructField*/true,
+ false, false, false, NotEncodedT);
}
}
}
@@ -5417,7 +5496,8 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
getObjCEncodingForTypeImpl(
FT->getReturnType(), S, ExpandPointedToStructures, ExpandStructures,
FD, false /* OutermostType */, EncodingProperty,
- false /* StructField */, EncodeBlockParameters, EncodeClassNames);
+ false /* StructField */, EncodeBlockParameters, EncodeClassNames, false,
+ NotEncodedT);
// Block self
S += "@?";
// Block parameters
@@ -5426,7 +5506,8 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
getObjCEncodingForTypeImpl(
I, S, ExpandPointedToStructures, ExpandStructures, FD,
false /* OutermostType */, EncodingProperty,
- false /* StructField */, EncodeBlockParameters, EncodeClassNames);
+ false /* StructField */, EncodeBlockParameters, EncodeClassNames,
+ false, NotEncodedT);
}
S += '>';
}
@@ -5468,7 +5549,8 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
else
getObjCEncodingForTypeImpl(Field->getType(), S, false, true, FD,
false, false, false, false, false,
- EncodePointerToObjCTypedef);
+ EncodePointerToObjCTypedef,
+ NotEncodedT);
}
S += '}';
return;
@@ -5555,19 +5637,21 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
// gcc just blithely ignores member pointers.
// FIXME: we shoul do better than that. 'M' is available.
case Type::MemberPointer:
- return;
-
+ // This matches gcc's encoding, even though technically it is insufficient.
+ //FIXME. We should do a better job than gcc.
case Type::Vector:
case Type::ExtVector:
- // This matches gcc's encoding, even though technically it is
- // insufficient.
- // FIXME. We should do a better job than gcc.
- return;
-
+ // Until we have a coherent encoding of these three types, issue warning.
+ { if (NotEncodedT)
+ *NotEncodedT = T;
+ return;
+ }
+
+ // We could see an undeduced auto type here during error recovery.
+ // Just ignore it.
case Type::Auto:
- // We could see an undeduced auto type here during error recovery.
- // Just ignore it.
return;
+
#define ABSTRACT_TYPE(KIND, BASE)
#define TYPE(KIND, BASE)
@@ -5586,7 +5670,8 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
std::string &S,
const FieldDecl *FD,
- bool includeVBases) const {
+ bool includeVBases,
+ QualType *NotEncodedT) const {
assert(RDecl && "Expected non-null RecordDecl");
assert(!RDecl->isUnion() && "Should not be called for unions");
if (!RDecl->getDefinition())
@@ -5610,12 +5695,11 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
}
unsigned i = 0;
- for (RecordDecl::field_iterator Field = RDecl->field_begin(),
- FieldEnd = RDecl->field_end();
- Field != FieldEnd; ++Field, ++i) {
+ for (auto *Field : RDecl->fields()) {
uint64_t offs = layout.getFieldOffset(i);
FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
- std::make_pair(offs, *Field));
+ std::make_pair(offs, Field));
+ ++i;
}
if (CXXRec && includeVBases) {
@@ -5691,7 +5775,8 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
// in the initial structure. Note that this differs from gcc which
// expands virtual bases each time one is encountered in the hierarchy,
// making the encoding type bigger than it really is.
- getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false);
+ getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false,
+ NotEncodedT);
assert(!base->isEmpty());
#ifndef NDEBUG
CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
@@ -5715,7 +5800,8 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
getObjCEncodingForTypeImpl(qt, S, false, true, FD,
/*OutermostType*/false,
/*EncodingProperty*/false,
- /*StructField*/true);
+ /*StructField*/true,
+ false, false, false, NotEncodedT);
#ifndef NDEBUG
CurOffs += getTypeSize(field->getType());
#endif
@@ -6654,11 +6740,9 @@ void getIntersectionOfProtocols(ASTContext &Context,
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSInheritedProtocols;
Context.CollectInheritedProtocols(RHS->getInterface(),
RHSInheritedProtocols);
- for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
- RHSInheritedProtocols.begin(),
- E = RHSInheritedProtocols.end(); I != E; ++I)
- if (InheritedProtocolSet.count((*I)))
- IntersectionOfProtocols.push_back((*I));
+ for (ObjCProtocolDecl *ProtDecl : RHSInheritedProtocols)
+ if (InheritedProtocolSet.count(ProtDecl))
+ IntersectionOfProtocols.push_back(ProtDecl);
}
}
@@ -6708,58 +6792,40 @@ bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
if (LHS->getNumProtocols() == 0)
return true;
- // Okay, we know the LHS has protocol qualifiers. If the RHS doesn't,
- // more detailed analysis is required.
- if (RHS->getNumProtocols() == 0) {
- // OK, if LHS is a superclass of RHS *and*
- // this superclass is assignment compatible with LHS.
- // false otherwise.
- bool IsSuperClass =
- LHS->getInterface()->isSuperClassOf(RHS->getInterface());
- if (IsSuperClass) {
- // OK if conversion of LHS to SuperClass results in narrowing of types
- // ; i.e., SuperClass may implement at least one of the protocols
- // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
- // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
- llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
- CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols);
- // If super class has no protocols, it is not a match.
- if (SuperClassInheritedProtocols.empty())
- return false;
+ // Okay, we know the LHS has protocol qualifiers. But RHS may or may not.
+ // More detailed analysis is required.
+ // OK, if LHS is same or a superclass of RHS *and*
+ // this LHS, or as RHS's super class is assignment compatible with LHS.
+ bool IsSuperClass =
+ LHS->getInterface()->isSuperClassOf(RHS->getInterface());
+ if (IsSuperClass) {
+ // OK if conversion of LHS to SuperClass results in narrowing of types
+ // ; i.e., SuperClass may implement at least one of the protocols
+ // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
+ // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
+ CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols);
+ // Also, if RHS has explicit quelifiers, include them for comparing with LHS's
+ // qualifiers.
+ for (auto *RHSPI : RHS->quals())
+ SuperClassInheritedProtocols.insert(RHSPI->getCanonicalDecl());
+ // If there is no protocols associated with RHS, it is not a match.
+ if (SuperClassInheritedProtocols.empty())
+ return false;
- for (const auto *LHSProto : LHS->quals()) {
- bool SuperImplementsProtocol = false;
- for (auto *SuperClassProto : SuperClassInheritedProtocols) {
- if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) {
- SuperImplementsProtocol = true;
- break;
- }
+ for (const auto *LHSProto : LHS->quals()) {
+ bool SuperImplementsProtocol = false;
+ for (auto *SuperClassProto : SuperClassInheritedProtocols)
+ if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) {
+ SuperImplementsProtocol = true;
+ break;
}
- if (!SuperImplementsProtocol)
- return false;
- }
- return true;
- }
- return false;
- }
-
- for (const auto *LHSPI : LHS->quals()) {
- bool RHSImplementsProtocol = false;
-
- // If the RHS doesn't implement the protocol on the left, the types
- // are incompatible.
- for (auto *RHSPI : RHS->quals()) {
- if (RHSPI->lookupProtocolNamed(LHSPI->getIdentifier())) {
- RHSImplementsProtocol = true;
- break;
- }
+ if (!SuperImplementsProtocol)
+ return false;
}
- // FIXME: For better diagnostics, consider passing back the protocol name.
- if (!RHSImplementsProtocol)
- return false;
+ return true;
}
- // The RHS implements all protocols listed on the LHS.
- return true;
+ return false;
}
bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
@@ -7895,7 +7961,9 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
// We never need to emit an uninstantiated function template.
if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
return false;
- } else
+ } else if (isa<OMPThreadPrivateDecl>(D))
+ return true;
+ else
return false;
// If this is a member of a class template, we do not need to emit it.
@@ -8236,7 +8304,7 @@ namespace {
} // end namespace
-ASTContext::ParentVector
+ArrayRef<ast_type_traits::DynTypedNode>
ASTContext::getParents(const ast_type_traits::DynTypedNode &Node) {
assert(Node.getMemoizationData() &&
"Invariant broken: only nodes that support memoization may be "
@@ -8249,13 +8317,12 @@ ASTContext::getParents(const ast_type_traits::DynTypedNode &Node) {
}
ParentMap::const_iterator I = AllParents->find(Node.getMemoizationData());
if (I == AllParents->end()) {
- return ParentVector();
+ return None;
}
- if (I->second.is<ast_type_traits::DynTypedNode *>()) {
- return ParentVector(1, *I->second.get<ast_type_traits::DynTypedNode *>());
+ if (auto *N = I->second.dyn_cast<ast_type_traits::DynTypedNode *>()) {
+ return llvm::makeArrayRef(N, 1);
}
- const auto &Parents = *I->second.get<ParentVector *>();
- return ParentVector(Parents.begin(), Parents.end());
+ return *I->second.get<ParentVector *>();
}
bool
diff --git a/lib/AST/ASTDiagnostic.cpp b/lib/AST/ASTDiagnostic.cpp
index 8c8b1dff0cbf..3212359db183 100644
--- a/lib/AST/ASTDiagnostic.cpp
+++ b/lib/AST/ASTDiagnostic.cpp
@@ -342,26 +342,22 @@ void clang::FormatASTNodeDiagnosticArgument(
assert(DC && "Should never have a null declaration context");
NeedQuotes = false;
+ // FIXME: Get the strings for DeclContext from some localized place
if (DC->isTranslationUnit()) {
- // FIXME: Get these strings from some localized place
if (Context.getLangOpts().CPlusPlus)
OS << "the global namespace";
else
OS << "the global scope";
+ } else if (DC->isClosure()) {
+ OS << "block literal";
+ } else if (isLambdaCallOperator(DC)) {
+ OS << "lambda expression";
} else if (TypeDecl *Type = dyn_cast<TypeDecl>(DC)) {
OS << ConvertTypeToDiagnosticString(Context,
Context.getTypeDeclType(Type),
PrevArgs, QualTypeVals);
} else {
- // FIXME: Get these strings from some localized place
- if (isa<BlockDecl>(DC)) {
- OS << "block literal";
- break;
- }
- if (isLambdaCallOperator(DC)) {
- OS << "lambda expression";
- break;
- }
+ assert(isa<NamedDecl>(DC) && "Expected a NamedDecl");
NamedDecl *ND = cast<NamedDecl>(DC);
if (isa<NamespaceDecl>(ND))
OS << "namespace ";
@@ -877,6 +873,194 @@ class TemplateDiff {
return Ty->getAs<TemplateSpecializationType>();
}
+ /// DiffTypes - Fills a DiffNode with information about a type difference.
+ void DiffTypes(const TSTiterator &FromIter, const TSTiterator &ToIter,
+ TemplateTypeParmDecl *FromDefaultTypeDecl,
+ TemplateTypeParmDecl *ToDefaultTypeDecl) {
+ QualType FromType = GetType(FromIter, FromDefaultTypeDecl);
+ QualType ToType = GetType(ToIter, ToDefaultTypeDecl);
+
+ Tree.SetNode(FromType, ToType);
+ Tree.SetDefault(FromIter.isEnd() && !FromType.isNull(),
+ ToIter.isEnd() && !ToType.isNull());
+ Tree.SetKind(DiffTree::Type);
+ if (FromType.isNull() || ToType.isNull())
+ return;
+
+ if (Context.hasSameType(FromType, ToType)) {
+ Tree.SetSame(true);
+ return;
+ }
+
+ const TemplateSpecializationType *FromArgTST =
+ GetTemplateSpecializationType(Context, FromType);
+ if (!FromArgTST)
+ return;
+
+ const TemplateSpecializationType *ToArgTST =
+ GetTemplateSpecializationType(Context, ToType);
+ if (!ToArgTST)
+ return;
+
+ if (!hasSameTemplate(FromArgTST, ToArgTST))
+ return;
+
+ Qualifiers FromQual = FromType.getQualifiers(),
+ ToQual = ToType.getQualifiers();
+ FromQual -= QualType(FromArgTST, 0).getQualifiers();
+ ToQual -= QualType(ToArgTST, 0).getQualifiers();
+ Tree.SetNode(FromArgTST->getTemplateName().getAsTemplateDecl(),
+ ToArgTST->getTemplateName().getAsTemplateDecl());
+ Tree.SetNode(FromQual, ToQual);
+ Tree.SetKind(DiffTree::Template);
+ DiffTemplate(FromArgTST, ToArgTST);
+ }
+
+ /// DiffTemplateTemplates - Fills a DiffNode with information about a
+ /// template template difference.
+ void DiffTemplateTemplates(const TSTiterator &FromIter,
+ const TSTiterator &ToIter,
+ TemplateTemplateParmDecl *FromDefaultTemplateDecl,
+ TemplateTemplateParmDecl *ToDefaultTemplateDecl) {
+ TemplateDecl *FromDecl = GetTemplateDecl(FromIter, FromDefaultTemplateDecl);
+ TemplateDecl *ToDecl = GetTemplateDecl(ToIter, ToDefaultTemplateDecl);
+ Tree.SetNode(FromDecl, ToDecl);
+ Tree.SetSame(FromDecl && ToDecl &&
+ FromDecl->getCanonicalDecl() == ToDecl->getCanonicalDecl());
+ Tree.SetDefault(FromIter.isEnd() && FromDecl, ToIter.isEnd() && ToDecl);
+ Tree.SetKind(DiffTree::TemplateTemplate);
+ }
+
+ /// InitializeNonTypeDiffVariables - Helper function for DiffNonTypes
+ static void InitializeNonTypeDiffVariables(
+ ASTContext &Context, const TSTiterator &Iter,
+ NonTypeTemplateParmDecl *Default, bool &HasInt, bool &HasValueDecl,
+ bool &IsNullPtr, Expr *&E, llvm::APSInt &Value, ValueDecl *&VD) {
+ HasInt = !Iter.isEnd() && Iter->getKind() == TemplateArgument::Integral;
+
+ HasValueDecl =
+ !Iter.isEnd() && Iter->getKind() == TemplateArgument::Declaration;
+
+ IsNullPtr = !Iter.isEnd() && Iter->getKind() == TemplateArgument::NullPtr;
+
+ if (HasInt)
+ Value = Iter->getAsIntegral();
+ else if (HasValueDecl)
+ VD = Iter->getAsDecl();
+ else if (!IsNullPtr)
+ E = GetExpr(Iter, Default);
+
+ if (E && Default->getType()->isPointerType())
+ IsNullPtr = CheckForNullPtr(Context, E);
+ }
+
+ /// NeedsAddressOf - Helper function for DiffNonTypes. Returns true if the
+ /// ValueDecl needs a '&' when printed.
+ static bool NeedsAddressOf(ValueDecl *VD, Expr *E,
+ NonTypeTemplateParmDecl *Default) {
+ if (!VD)
+ return false;
+
+ if (E) {
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
+ if (UO->getOpcode() == UO_AddrOf) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ if (!Default->getType()->isReferenceType()) {
+ return true;
+ }
+
+ return false;
+ }
+
+ /// DiffNonTypes - Handles any template parameters not handled by DiffTypes
+ /// of DiffTemplatesTemplates, such as integer and declaration parameters.
+ void DiffNonTypes(const TSTiterator &FromIter, const TSTiterator &ToIter,
+ NonTypeTemplateParmDecl *FromDefaultNonTypeDecl,
+ NonTypeTemplateParmDecl *ToDefaultNonTypeDecl) {
+ Expr *FromExpr = nullptr, *ToExpr = nullptr;
+ llvm::APSInt FromInt, ToInt;
+ ValueDecl *FromValueDecl = nullptr, *ToValueDecl = nullptr;
+ bool HasFromInt = false, HasToInt = false, HasFromValueDecl = false,
+ HasToValueDecl = false, FromNullPtr = false, ToNullPtr = false;
+ InitializeNonTypeDiffVariables(Context, FromIter, FromDefaultNonTypeDecl,
+ HasFromInt, HasFromValueDecl, FromNullPtr,
+ FromExpr, FromInt, FromValueDecl);
+ InitializeNonTypeDiffVariables(Context, ToIter, ToDefaultNonTypeDecl,
+ HasToInt, HasToValueDecl, ToNullPtr,
+ ToExpr, ToInt, ToValueDecl);
+
+ assert(((!HasFromInt && !HasToInt) ||
+ (!HasFromValueDecl && !HasToValueDecl)) &&
+ "Template argument cannot be both integer and declaration");
+
+ unsigned ParamWidth = 128; // Safe default
+ if (FromDefaultNonTypeDecl->getType()->isIntegralOrEnumerationType())
+ ParamWidth = Context.getIntWidth(FromDefaultNonTypeDecl->getType());
+
+ if (!HasFromInt && !HasToInt && !HasFromValueDecl && !HasToValueDecl) {
+ Tree.SetNode(FromExpr, ToExpr);
+ Tree.SetDefault(FromIter.isEnd() && FromExpr, ToIter.isEnd() && ToExpr);
+ if (FromDefaultNonTypeDecl->getType()->isIntegralOrEnumerationType()) {
+ if (FromExpr)
+ HasFromInt = GetInt(Context, FromIter, FromExpr, FromInt);
+ if (ToExpr)
+ HasToInt = GetInt(Context, ToIter, ToExpr, ToInt);
+ }
+ if (HasFromInt && HasToInt) {
+ Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
+ Tree.SetSame(IsSameConvertedInt(ParamWidth, FromInt, ToInt));
+ Tree.SetKind(DiffTree::Integer);
+ } else if (HasFromInt || HasToInt) {
+ Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
+ Tree.SetSame(false);
+ Tree.SetKind(DiffTree::Integer);
+ } else {
+ Tree.SetSame(IsEqualExpr(Context, ParamWidth, FromExpr, ToExpr) ||
+ (FromNullPtr && ToNullPtr));
+ Tree.SetNullPtr(FromNullPtr, ToNullPtr);
+ Tree.SetKind(DiffTree::Expression);
+ }
+ return;
+ }
+
+ if (HasFromInt || HasToInt) {
+ if (!HasFromInt && FromExpr)
+ HasFromInt = GetInt(Context, FromIter, FromExpr, FromInt);
+ if (!HasToInt && ToExpr)
+ HasToInt = GetInt(Context, ToIter, ToExpr, ToInt);
+ Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
+ Tree.SetSame(IsSameConvertedInt(ParamWidth, FromInt, ToInt));
+ Tree.SetDefault(FromIter.isEnd() && HasFromInt,
+ ToIter.isEnd() && HasToInt);
+ Tree.SetKind(DiffTree::Integer);
+ return;
+ }
+
+ if (!HasFromValueDecl && FromExpr)
+ FromValueDecl = GetValueDecl(FromIter, FromExpr);
+ if (!HasToValueDecl && ToExpr)
+ ToValueDecl = GetValueDecl(ToIter, ToExpr);
+
+ bool FromAddressOf =
+ NeedsAddressOf(FromValueDecl, FromExpr, FromDefaultNonTypeDecl);
+ bool ToAddressOf =
+ NeedsAddressOf(ToValueDecl, ToExpr, ToDefaultNonTypeDecl);
+
+ Tree.SetNullPtr(FromNullPtr, ToNullPtr);
+ Tree.SetNode(FromValueDecl, ToValueDecl, FromAddressOf, ToAddressOf);
+ Tree.SetSame(FromValueDecl && ToValueDecl &&
+ FromValueDecl->getCanonicalDecl() ==
+ ToValueDecl->getCanonicalDecl());
+ Tree.SetDefault(FromIter.isEnd() && FromValueDecl,
+ ToIter.isEnd() && ToValueDecl);
+ Tree.SetKind(DiffTree::Declaration);
+ }
+
/// DiffTemplate - recursively visits template arguments and stores the
/// argument info into a tree.
void DiffTemplate(const TemplateSpecializationType *FromTST,
@@ -894,191 +1078,33 @@ class TemplateDiff {
// Get the parameter at index TotalArgs. If index is larger
// than the total number of parameters, then there is an
// argument pack, so re-use the last parameter.
- unsigned ParamIndex = std::min(TotalArgs, ParamsFrom->size() - 1);
- NamedDecl *ParamND = ParamsFrom->getParam(ParamIndex);
-
- // Handle Types
- if (TemplateTypeParmDecl *DefaultTTPD =
- dyn_cast<TemplateTypeParmDecl>(ParamND)) {
- QualType FromType, ToType;
- FromType = GetType(FromIter, DefaultTTPD);
- // A forward declaration can have no default arg but the actual class
- // can, don't mix up iterators and get the original parameter.
- ToType = GetType(
- ToIter, cast<TemplateTypeParmDecl>(ParamsTo->getParam(ParamIndex)));
- Tree.SetNode(FromType, ToType);
- Tree.SetDefault(FromIter.isEnd() && !FromType.isNull(),
- ToIter.isEnd() && !ToType.isNull());
- Tree.SetKind(DiffTree::Type);
- if (!FromType.isNull() && !ToType.isNull()) {
- if (Context.hasSameType(FromType, ToType)) {
- Tree.SetSame(true);
- } else {
- Qualifiers FromQual = FromType.getQualifiers(),
- ToQual = ToType.getQualifiers();
- const TemplateSpecializationType *FromArgTST =
- GetTemplateSpecializationType(Context, FromType);
- const TemplateSpecializationType *ToArgTST =
- GetTemplateSpecializationType(Context, ToType);
-
- if (FromArgTST && ToArgTST &&
- hasSameTemplate(FromArgTST, ToArgTST)) {
- FromQual -= QualType(FromArgTST, 0).getQualifiers();
- ToQual -= QualType(ToArgTST, 0).getQualifiers();
- Tree.SetNode(FromArgTST->getTemplateName().getAsTemplateDecl(),
- ToArgTST->getTemplateName().getAsTemplateDecl());
- Tree.SetNode(FromQual, ToQual);
- Tree.SetKind(DiffTree::Template);
- DiffTemplate(FromArgTST, ToArgTST);
- }
- }
- }
- }
-
- // Handle Expressions
- if (NonTypeTemplateParmDecl *DefaultNTTPD =
- dyn_cast<NonTypeTemplateParmDecl>(ParamND)) {
- Expr *FromExpr = nullptr, *ToExpr = nullptr;
- llvm::APSInt FromInt, ToInt;
- ValueDecl *FromValueDecl = nullptr, *ToValueDecl = nullptr;
- unsigned ParamWidth = 128; // Safe default
- if (DefaultNTTPD->getType()->isIntegralOrEnumerationType())
- ParamWidth = Context.getIntWidth(DefaultNTTPD->getType());
- bool HasFromInt = !FromIter.isEnd() &&
- FromIter->getKind() == TemplateArgument::Integral;
- bool HasToInt = !ToIter.isEnd() &&
- ToIter->getKind() == TemplateArgument::Integral;
- bool HasFromValueDecl =
- !FromIter.isEnd() &&
- FromIter->getKind() == TemplateArgument::Declaration;
- bool HasToValueDecl =
- !ToIter.isEnd() &&
- ToIter->getKind() == TemplateArgument::Declaration;
- bool FromNullPtr = !FromIter.isEnd() &&
- FromIter->getKind() == TemplateArgument::NullPtr;
- bool ToNullPtr =
- !ToIter.isEnd() && ToIter->getKind() == TemplateArgument::NullPtr;
-
- assert(((!HasFromInt && !HasToInt) ||
- (!HasFromValueDecl && !HasToValueDecl)) &&
- "Template argument cannot be both integer and declaration");
-
- if (HasFromInt)
- FromInt = FromIter->getAsIntegral();
- else if (HasFromValueDecl)
- FromValueDecl = FromIter->getAsDecl();
- else if (!FromNullPtr)
- FromExpr = GetExpr(FromIter, DefaultNTTPD);
-
- if (HasToInt)
- ToInt = ToIter->getAsIntegral();
- else if (HasToValueDecl)
- ToValueDecl = ToIter->getAsDecl();
- else if (!ToNullPtr)
- ToExpr = GetExpr(ToIter, DefaultNTTPD);
-
- bool TemplateArgumentIsPointerType =
- DefaultNTTPD->getType()->isPointerType();
- if (FromExpr && TemplateArgumentIsPointerType) {
- FromNullPtr = CheckForNullPtr(FromExpr);
- }
- if (ToExpr && TemplateArgumentIsPointerType) {
- ToNullPtr = CheckForNullPtr(ToExpr);
- }
-
- if (!HasFromInt && !HasToInt && !HasFromValueDecl && !HasToValueDecl) {
- Tree.SetNode(FromExpr, ToExpr);
- Tree.SetDefault(FromIter.isEnd() && FromExpr,
- ToIter.isEnd() && ToExpr);
- if (DefaultNTTPD->getType()->isIntegralOrEnumerationType()) {
- if (FromExpr)
- HasFromInt = GetInt(FromIter, FromExpr, FromInt);
- if (ToExpr)
- HasToInt = GetInt(ToIter, ToExpr, ToInt);
- }
- if (HasFromInt && HasToInt) {
- Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
- Tree.SetSame(IsSameConvertedInt(ParamWidth, FromInt, ToInt));
- Tree.SetKind(DiffTree::Integer);
- } else if (HasFromInt || HasToInt) {
- Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
- Tree.SetSame(false);
- Tree.SetKind(DiffTree::Integer);
- } else {
- Tree.SetSame(IsEqualExpr(Context, ParamWidth, FromExpr, ToExpr) ||
- (FromNullPtr && ToNullPtr));
- Tree.SetNullPtr(FromNullPtr, ToNullPtr);
- Tree.SetKind(DiffTree::Expression);
- }
- } else if (HasFromInt || HasToInt) {
- if (!HasFromInt && FromExpr)
- HasFromInt = GetInt(FromIter, FromExpr, FromInt);
- if (!HasToInt && ToExpr)
- HasToInt = GetInt(ToIter, ToExpr, ToInt);
- Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
- Tree.SetSame(IsSameConvertedInt(ParamWidth, FromInt, ToInt));
- Tree.SetDefault(FromIter.isEnd() && HasFromInt,
- ToIter.isEnd() && HasToInt);
- Tree.SetKind(DiffTree::Integer);
- } else {
- if (!HasFromValueDecl && FromExpr)
- FromValueDecl = GetValueDecl(FromIter, FromExpr);
- if (!HasToValueDecl && ToExpr)
- ToValueDecl = GetValueDecl(ToIter, ToExpr);
- QualType ArgumentType = DefaultNTTPD->getType();
- bool FromAddressOf = false;
- if (FromValueDecl) {
- if (FromExpr) {
- if (UnaryOperator *UO =
- dyn_cast<UnaryOperator>(FromExpr->IgnoreParens())) {
- if (UO->getOpcode() == UO_AddrOf)
- FromAddressOf = true;
- }
- } else {
- if (!ArgumentType->isReferenceType()) {
- FromAddressOf = true;
- }
- }
- }
- bool ToAddressOf = false;
- if (ToValueDecl) {
- if (ToExpr) {
- if (UnaryOperator *UO =
- dyn_cast<UnaryOperator>(ToExpr->IgnoreParens())) {
- if (UO->getOpcode() == UO_AddrOf) {
- ToAddressOf = true;
- }
- }
- } else {
- if (!ArgumentType->isReferenceType()) {
- ToAddressOf = true;
- }
- }
- }
- Tree.SetNullPtr(FromNullPtr, ToNullPtr);
- Tree.SetNode(FromValueDecl, ToValueDecl, FromAddressOf, ToAddressOf);
- Tree.SetSame(FromValueDecl && ToValueDecl &&
- FromValueDecl->getCanonicalDecl() ==
- ToValueDecl->getCanonicalDecl());
- Tree.SetDefault(FromIter.isEnd() && FromValueDecl,
- ToIter.isEnd() && ToValueDecl);
- Tree.SetKind(DiffTree::Declaration);
- }
- }
-
- // Handle Templates
- if (TemplateTemplateParmDecl *DefaultTTPD =
- dyn_cast<TemplateTemplateParmDecl>(ParamND)) {
- TemplateDecl *FromDecl, *ToDecl;
- FromDecl = GetTemplateDecl(FromIter, DefaultTTPD);
- ToDecl = GetTemplateDecl(ToIter, DefaultTTPD);
- Tree.SetNode(FromDecl, ToDecl);
- Tree.SetSame(
- FromDecl && ToDecl &&
- FromDecl->getCanonicalDecl() == ToDecl->getCanonicalDecl());
- Tree.SetDefault(FromIter.isEnd() && FromDecl, ToIter.isEnd() && ToDecl);
- Tree.SetKind(DiffTree::TemplateTemplate);
- }
+ unsigned FromParamIndex = std::min(TotalArgs, ParamsFrom->size() - 1);
+ unsigned ToParamIndex = std::min(TotalArgs, ParamsTo->size() - 1);
+ NamedDecl *FromParamND = ParamsFrom->getParam(FromParamIndex);
+ NamedDecl *ToParamND = ParamsTo->getParam(ToParamIndex);
+
+ TemplateTypeParmDecl *FromDefaultTypeDecl =
+ dyn_cast<TemplateTypeParmDecl>(FromParamND);
+ TemplateTypeParmDecl *ToDefaultTypeDecl =
+ dyn_cast<TemplateTypeParmDecl>(ToParamND);
+ if (FromDefaultTypeDecl && ToDefaultTypeDecl)
+ DiffTypes(FromIter, ToIter, FromDefaultTypeDecl, ToDefaultTypeDecl);
+
+ TemplateTemplateParmDecl *FromDefaultTemplateDecl =
+ dyn_cast<TemplateTemplateParmDecl>(FromParamND);
+ TemplateTemplateParmDecl *ToDefaultTemplateDecl =
+ dyn_cast<TemplateTemplateParmDecl>(ToParamND);
+ if (FromDefaultTemplateDecl && ToDefaultTemplateDecl)
+ DiffTemplateTemplates(FromIter, ToIter, FromDefaultTemplateDecl,
+ ToDefaultTemplateDecl);
+
+ NonTypeTemplateParmDecl *FromDefaultNonTypeDecl =
+ dyn_cast<NonTypeTemplateParmDecl>(FromParamND);
+ NonTypeTemplateParmDecl *ToDefaultNonTypeDecl =
+ dyn_cast<NonTypeTemplateParmDecl>(ToParamND);
+ if (FromDefaultNonTypeDecl && ToDefaultNonTypeDecl)
+ DiffNonTypes(FromIter, ToIter, FromDefaultNonTypeDecl,
+ ToDefaultNonTypeDecl);
++FromIter;
++ToIter;
@@ -1147,7 +1173,8 @@ class TemplateDiff {
/// GetType - Retrieves the template type arguments, including default
/// arguments.
- QualType GetType(const TSTiterator &Iter, TemplateTypeParmDecl *DefaultTTPD) {
+ static QualType GetType(const TSTiterator &Iter,
+ TemplateTypeParmDecl *DefaultTTPD) {
bool isVariadic = DefaultTTPD->isParameterPack();
if (!Iter.isEnd())
@@ -1164,7 +1191,8 @@ class TemplateDiff {
/// GetExpr - Retrieves the template expression argument, including default
/// arguments.
- Expr *GetExpr(const TSTiterator &Iter, NonTypeTemplateParmDecl *DefaultNTTPD) {
+ static Expr *GetExpr(const TSTiterator &Iter,
+ NonTypeTemplateParmDecl *DefaultNTTPD) {
Expr *ArgExpr = nullptr;
bool isVariadic = DefaultNTTPD->isParameterPack();
@@ -1183,7 +1211,8 @@ class TemplateDiff {
/// GetInt - Retrieves the template integer argument, including evaluating
/// default arguments.
- bool GetInt(const TSTiterator &Iter, Expr *ArgExpr, llvm::APInt &Int) {
+ static bool GetInt(ASTContext &Context, const TSTiterator &Iter,
+ Expr *ArgExpr, llvm::APInt &Int) {
// Default, value-depenedent expressions require fetching
// from the desugared TemplateArgument, otherwise expression needs to
// be evaluatable.
@@ -1209,7 +1238,7 @@ class TemplateDiff {
/// GetValueDecl - Retrieves the template Decl argument, including
/// default expression argument.
- ValueDecl *GetValueDecl(const TSTiterator &Iter, Expr *ArgExpr) {
+ static ValueDecl *GetValueDecl(const TSTiterator &Iter, Expr *ArgExpr) {
// Default, value-depenedent expressions require fetching
// from the desugared TemplateArgument
if (Iter.isEnd() && ArgExpr->isValueDependent())
@@ -1235,7 +1264,7 @@ class TemplateDiff {
/// CheckForNullPtr - returns true if the expression can be evaluated as
/// a null pointer
- bool CheckForNullPtr(Expr *E) {
+ static bool CheckForNullPtr(ASTContext &Context, Expr *E) {
assert(E && "Expected expression");
E = E->IgnoreParenCasts();
@@ -1256,7 +1285,7 @@ class TemplateDiff {
/// GetTemplateDecl - Retrieves the template template arguments, including
/// default arguments.
- TemplateDecl *GetTemplateDecl(const TSTiterator &Iter,
+ static TemplateDecl *GetTemplateDecl(const TSTiterator &Iter,
TemplateTemplateParmDecl *DefaultTTPD) {
bool isVariadic = DefaultTTPD->isParameterPack();
diff --git a/lib/AST/ASTDumper.cpp b/lib/AST/ASTDumper.cpp
index df7a2cb4712b..ebf5e651ef9a 100644
--- a/lib/AST/ASTDumper.cpp
+++ b/lib/AST/ASTDumper.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/Support/raw_ostream.h"
@@ -90,24 +91,22 @@ namespace {
class ASTDumper
: public ConstDeclVisitor<ASTDumper>, public ConstStmtVisitor<ASTDumper>,
- public ConstCommentVisitor<ASTDumper> {
+ public ConstCommentVisitor<ASTDumper>, public TypeVisitor<ASTDumper> {
raw_ostream &OS;
const CommandTraits *Traits;
const SourceManager *SM;
- bool IsFirstLine;
- // Indicates whether more child are expected at the current tree depth
- enum IndentType { IT_Child, IT_LastChild };
+ /// Pending[i] is an action to dump an entity at level i.
+ llvm::SmallVector<std::function<void(bool isLastChild)>, 32> Pending;
- /// Indents[i] indicates if another child exists at level i.
- /// Used by Indent() to print the tree structure.
- llvm::SmallVector<IndentType, 32> Indents;
+ /// Indicates whether we're at the top level.
+ bool TopLevel;
- /// Indicates that more children will be needed at this indent level.
- /// If true, prevents lastChild() from marking the node as the last child.
- /// This is used when there are multiple collections of children to be
- /// dumped as well as during conditional node dumping.
- bool MoreChildren;
+ /// Indicates if we're handling the first child after entering a new depth.
+ bool FirstChild;
+
+ /// Prefix for currently-being-dumped entity.
+ std::string Prefix;
/// Keep track of the last location we print out so that we can
/// print out deltas from then on out.
@@ -119,21 +118,70 @@ namespace {
bool ShowColors;
- class IndentScope {
- ASTDumper &Dumper;
- // Preserve the Dumper's MoreChildren value from the previous IndentScope
- bool MoreChildren;
- public:
- IndentScope(ASTDumper &Dumper) : Dumper(Dumper) {
- MoreChildren = Dumper.hasMoreChildren();
- Dumper.setMoreChildren(false);
- Dumper.indent();
+ /// Dump a child of the current node.
+ template<typename Fn> void dumpChild(Fn doDumpChild) {
+ // If we're at the top level, there's nothing interesting to do; just
+ // run the dumper.
+ if (TopLevel) {
+ TopLevel = false;
+ doDumpChild();
+ while (!Pending.empty()) {
+ Pending.back()(true);
+ Pending.pop_back();
+ }
+ Prefix.clear();
+ OS << "\n";
+ TopLevel = true;
+ return;
}
- ~IndentScope() {
- Dumper.setMoreChildren(MoreChildren);
- Dumper.unindent();
+
+ const FullComment *OrigFC = FC;
+ auto dumpWithIndent = [this, doDumpChild, OrigFC](bool isLastChild) {
+ // Print out the appropriate tree structure and work out the prefix for
+ // children of this node. For instance:
+ //
+ // A Prefix = ""
+ // |-B Prefix = "| "
+ // | `-C Prefix = "| "
+ // `-D Prefix = " "
+ // |-E Prefix = " | "
+ // `-F Prefix = " "
+ // G Prefix = ""
+ //
+ // Note that the first level gets no prefix.
+ {
+ OS << '\n';
+ ColorScope Color(*this, IndentColor);
+ OS << Prefix << (isLastChild ? '`' : '|') << '-';
+ this->Prefix.push_back(isLastChild ? ' ' : '|');
+ this->Prefix.push_back(' ');
+ }
+
+ FirstChild = true;
+ unsigned Depth = Pending.size();
+
+ FC = OrigFC;
+ doDumpChild();
+
+ // If any children are left, they're the last at their nesting level.
+ // Dump those ones out now.
+ while (Depth < Pending.size()) {
+ Pending.back()(true);
+ this->Pending.pop_back();
+ }
+
+ // Restore the old prefix.
+ this->Prefix.resize(Prefix.size() - 2);
+ };
+
+ if (FirstChild) {
+ Pending.push_back(std::move(dumpWithIndent));
+ } else {
+ Pending.back()(false);
+ Pending.back() = std::move(dumpWithIndent);
}
- };
+ FirstChild = false;
+ }
class ColorScope {
ASTDumper &Dumper;
@@ -149,78 +197,37 @@ namespace {
}
};
- class ChildDumper {
- ASTDumper &Dumper;
-
- const Decl *Prev;
- bool PrevRef;
- public:
- ChildDumper(ASTDumper &Dumper) : Dumper(Dumper), Prev(nullptr) {}
- ~ChildDumper() {
- if (Prev) {
- Dumper.lastChild();
- dump(nullptr);
- }
- }
-
- // FIXME: This should take an arbitrary callable as the dumping action.
- void dump(const Decl *D, bool Ref = false) {
- if (Prev) {
- if (PrevRef)
- Dumper.dumpDeclRef(Prev);
- else
- Dumper.dumpDecl(Prev);
- }
- Prev = D;
- PrevRef = Ref;
- }
- void dumpRef(const Decl *D) { dump(D, true); }
-
- // Give up ownership of the children of the node. By calling this,
- // the caller takes back responsibility for calling lastChild().
- void release() { dump(nullptr); }
- };
-
public:
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM)
- : OS(OS), Traits(Traits), SM(SM), IsFirstLine(true), MoreChildren(false),
+ : OS(OS), Traits(Traits), SM(SM), TopLevel(true), FirstChild(true),
LastLocFilename(""), LastLocLine(~0U), FC(nullptr),
ShowColors(SM && SM->getDiagnostics().getShowColors()) { }
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM, bool ShowColors)
- : OS(OS), Traits(Traits), SM(SM), IsFirstLine(true), MoreChildren(false),
+ : OS(OS), Traits(Traits), SM(SM), TopLevel(true), FirstChild(true),
LastLocFilename(""), LastLocLine(~0U),
ShowColors(ShowColors) { }
- ~ASTDumper() {
- OS << "\n";
- }
-
void dumpDecl(const Decl *D);
void dumpStmt(const Stmt *S);
void dumpFullComment(const FullComment *C);
- // Formatting
- void indent();
- void unindent();
- void lastChild();
- bool hasMoreChildren();
- void setMoreChildren(bool Value);
-
// Utilities
void dumpPointer(const void *Ptr);
void dumpSourceRange(SourceRange R);
void dumpLocation(SourceLocation Loc);
- void dumpBareType(QualType T);
+ void dumpBareType(QualType T, bool Desugar = true);
void dumpType(QualType T);
+ void dumpTypeAsChild(QualType T);
+ void dumpTypeAsChild(const Type *T);
void dumpBareDeclRef(const Decl *Node);
void dumpDeclRef(const Decl *Node, const char *Label = nullptr);
void dumpName(const NamedDecl *D);
bool hasNodes(const DeclContext *DC);
void dumpDeclContext(const DeclContext *DC);
- void dumpLookups(const DeclContext *DC);
+ void dumpLookups(const DeclContext *DC, bool DumpDecls);
void dumpAttr(const Attr *A);
// C++ Utilities
@@ -233,6 +240,175 @@ namespace {
void dumpTemplateArgument(const TemplateArgument &A,
SourceRange R = SourceRange());
+ // Types
+ void VisitComplexType(const ComplexType *T) {
+ dumpTypeAsChild(T->getElementType());
+ }
+ void VisitPointerType(const PointerType *T) {
+ dumpTypeAsChild(T->getPointeeType());
+ }
+ void VisitBlockPointerType(const BlockPointerType *T) {
+ dumpTypeAsChild(T->getPointeeType());
+ }
+ void VisitReferenceType(const ReferenceType *T) {
+ dumpTypeAsChild(T->getPointeeType());
+ }
+ void VisitRValueReferenceType(const ReferenceType *T) {
+ if (T->isSpelledAsLValue())
+ OS << " written as lvalue reference";
+ VisitReferenceType(T);
+ }
+ void VisitMemberPointerType(const MemberPointerType *T) {
+ dumpTypeAsChild(T->getClass());
+ dumpTypeAsChild(T->getPointeeType());
+ }
+ void VisitArrayType(const ArrayType *T) {
+ switch (T->getSizeModifier()) {
+ case ArrayType::Normal: break;
+ case ArrayType::Static: OS << " static"; break;
+ case ArrayType::Star: OS << " *"; break;
+ }
+ OS << " " << T->getIndexTypeQualifiers().getAsString();
+ dumpTypeAsChild(T->getElementType());
+ }
+ void VisitConstantArrayType(const ConstantArrayType *T) {
+ OS << " " << T->getSize();
+ VisitArrayType(T);
+ }
+ void VisitVariableArrayType(const VariableArrayType *T) {
+ OS << " ";
+ dumpSourceRange(T->getBracketsRange());
+ VisitArrayType(T);
+ dumpStmt(T->getSizeExpr());
+ }
+ void VisitDependentSizedArrayType(const DependentSizedArrayType *T) {
+ VisitArrayType(T);
+ OS << " ";
+ dumpSourceRange(T->getBracketsRange());
+ dumpStmt(T->getSizeExpr());
+ }
+ void VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *T) {
+ OS << " ";
+ dumpLocation(T->getAttributeLoc());
+ dumpTypeAsChild(T->getElementType());
+ dumpStmt(T->getSizeExpr());
+ }
+ void VisitVectorType(const VectorType *T) {
+ switch (T->getVectorKind()) {
+ case VectorType::GenericVector: break;
+ case VectorType::AltiVecVector: OS << " altivec"; break;
+ case VectorType::AltiVecPixel: OS << " altivec pixel"; break;
+ case VectorType::AltiVecBool: OS << " altivec bool"; break;
+ case VectorType::NeonVector: OS << " neon"; break;
+ case VectorType::NeonPolyVector: OS << " neon poly"; break;
+ }
+ OS << " " << T->getNumElements();
+ dumpTypeAsChild(T->getElementType());
+ }
+ void VisitFunctionType(const FunctionType *T) {
+ auto EI = T->getExtInfo();
+ if (EI.getNoReturn()) OS << " noreturn";
+ if (EI.getProducesResult()) OS << " produces_result";
+ if (EI.getHasRegParm()) OS << " regparm " << EI.getRegParm();
+ OS << " " << FunctionType::getNameForCallConv(EI.getCC());
+ dumpTypeAsChild(T->getReturnType());
+ }
+ void VisitFunctionProtoType(const FunctionProtoType *T) {
+ auto EPI = T->getExtProtoInfo();
+ if (EPI.HasTrailingReturn) OS << " trailing_return";
+ if (T->isConst()) OS << " const";
+ if (T->isVolatile()) OS << " volatile";
+ if (T->isRestrict()) OS << " restrict";
+ switch (EPI.RefQualifier) {
+ case RQ_None: break;
+ case RQ_LValue: OS << " &"; break;
+ case RQ_RValue: OS << " &&"; break;
+ }
+ // FIXME: Exception specification.
+ // FIXME: Consumed parameters.
+ VisitFunctionType(T);
+ for (QualType PT : T->getParamTypes())
+ dumpTypeAsChild(PT);
+ if (EPI.Variadic)
+ dumpChild([=] { OS << "..."; });
+ }
+ void VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
+ dumpDeclRef(T->getDecl());
+ }
+ void VisitTypedefType(const TypedefType *T) {
+ dumpDeclRef(T->getDecl());
+ }
+ void VisitTypeOfExprType(const TypeOfExprType *T) {
+ dumpStmt(T->getUnderlyingExpr());
+ }
+ void VisitDecltypeType(const DecltypeType *T) {
+ dumpStmt(T->getUnderlyingExpr());
+ }
+ void VisitUnaryTransformType(const UnaryTransformType *T) {
+ switch (T->getUTTKind()) {
+ case UnaryTransformType::EnumUnderlyingType:
+ OS << " underlying_type";
+ break;
+ }
+ dumpTypeAsChild(T->getBaseType());
+ }
+ void VisitTagType(const TagType *T) {
+ dumpDeclRef(T->getDecl());
+ }
+ void VisitAttributedType(const AttributedType *T) {
+ // FIXME: AttrKind
+ dumpTypeAsChild(T->getModifiedType());
+ }
+ void VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
+ OS << " depth " << T->getDepth() << " index " << T->getIndex();
+ if (T->isParameterPack()) OS << " pack";
+ dumpDeclRef(T->getDecl());
+ }
+ void VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
+ dumpTypeAsChild(T->getReplacedParameter());
+ }
+ void VisitSubstTemplateTypeParmPackType(
+ const SubstTemplateTypeParmPackType *T) {
+ dumpTypeAsChild(T->getReplacedParameter());
+ dumpTemplateArgument(T->getArgumentPack());
+ }
+ void VisitAutoType(const AutoType *T) {
+ if (T->isDecltypeAuto()) OS << " decltype(auto)";
+ if (!T->isDeduced())
+ OS << " undeduced";
+ }
+ void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
+ if (T->isTypeAlias()) OS << " alias";
+ OS << " "; T->getTemplateName().dump(OS);
+ for (auto &Arg : *T)
+ dumpTemplateArgument(Arg);
+ if (T->isTypeAlias())
+ dumpTypeAsChild(T->getAliasedType());
+ }
+ void VisitInjectedClassNameType(const InjectedClassNameType *T) {
+ dumpDeclRef(T->getDecl());
+ }
+ void VisitObjCInterfaceType(const ObjCInterfaceType *T) {
+ dumpDeclRef(T->getDecl());
+ }
+ void VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
+ dumpTypeAsChild(T->getPointeeType());
+ }
+ void VisitAtomicType(const AtomicType *T) {
+ dumpTypeAsChild(T->getValueType());
+ }
+ void VisitAdjustedType(const AdjustedType *T) {
+ dumpTypeAsChild(T->getOriginalType());
+ }
+ void VisitPackExpansionType(const PackExpansionType *T) {
+ if (auto N = T->getNumExpansions()) OS << " expansions " << *N;
+ if (!T->isSugared())
+ dumpTypeAsChild(T->getPattern());
+ }
+ // FIXME: ElaboratedType, DependentNameType,
+ // DependentTemplateSpecializationType, ObjCObjectType
+
// Decls
void VisitLabelDecl(const LabelDecl *D);
void VisitTypedefDecl(const TypedefDecl *D);
@@ -255,8 +431,7 @@ namespace {
void VisitCXXRecordDecl(const CXXRecordDecl *D);
void VisitStaticAssertDecl(const StaticAssertDecl *D);
template<typename SpecializationDecl>
- void VisitTemplateDeclSpecialization(ChildDumper &Children,
- const SpecializationDecl *D,
+ void VisitTemplateDeclSpecialization(const SpecializationDecl *D,
bool DumpExplicitInst,
bool DumpRefOnly);
template<typename TemplateDecl>
@@ -378,67 +553,6 @@ namespace {
// Utilities
//===----------------------------------------------------------------------===//
-// Print out the appropriate tree structure using the Indents vector.
-// Example of tree and the Indents vector at each level.
-// A { }
-// |-B { IT_Child }
-// | `-C { IT_Child, IT_LastChild }
-// `-D { IT_LastChild }
-// |-E { IT_LastChild, IT_Child }
-// `-F { IT_LastChild, IT_LastChild }
-// Type non-last element, last element
-// IT_Child "| " "|-"
-// IT_LastChild " " "`-"
-void ASTDumper::indent() {
- if (IsFirstLine)
- IsFirstLine = false;
- else
- OS << "\n";
-
- ColorScope Color(*this, IndentColor);
- for (SmallVectorImpl<IndentType>::const_iterator I = Indents.begin(),
- E = Indents.end();
- I != E; ++I) {
- switch (*I) {
- case IT_Child:
- if (I == E - 1)
- OS << "|-";
- else
- OS << "| ";
- continue;
- case IT_LastChild:
- if (I == E - 1)
- OS << "`-";
- else
- OS << " ";
- continue;
- }
- llvm_unreachable("Invalid IndentType");
- }
- Indents.push_back(IT_Child);
-}
-
-void ASTDumper::unindent() {
- Indents.pop_back();
-}
-
-// Call before each potential last child node is to be dumped. If MoreChildren
-// is false, then this is the last child, otherwise treat as a regular node.
-void ASTDumper::lastChild() {
- if (!hasMoreChildren())
- Indents.back() = IT_LastChild;
-}
-
-// MoreChildren should be set before calling another function that may print
-// additional nodes to prevent conflicting final child nodes.
-bool ASTDumper::hasMoreChildren() {
- return MoreChildren;
-}
-
-void ASTDumper::setMoreChildren(bool Value) {
- MoreChildren = Value;
-}
-
void ASTDumper::dumpPointer(const void *Ptr) {
ColorScope Color(*this, AddressColor);
OS << ' ' << Ptr;
@@ -491,13 +605,13 @@ void ASTDumper::dumpSourceRange(SourceRange R) {
}
-void ASTDumper::dumpBareType(QualType T) {
+void ASTDumper::dumpBareType(QualType T, bool Desugar) {
ColorScope Color(*this, TypeColor);
-
+
SplitQualType T_split = T.split();
OS << "'" << QualType::getAsString(T_split) << "'";
- if (!T.isNull()) {
+ if (Desugar && !T.isNull()) {
// If the type is sugared, also dump a (shallow) desugared type.
SplitQualType D_split = T.getSplitDesugaredType();
if (T_split != D_split)
@@ -510,6 +624,59 @@ void ASTDumper::dumpType(QualType T) {
dumpBareType(T);
}
+void ASTDumper::dumpTypeAsChild(QualType T) {
+ SplitQualType SQT = T.split();
+ if (!SQT.Quals.hasQualifiers())
+ return dumpTypeAsChild(SQT.Ty);
+
+ dumpChild([=] {
+ OS << "QualType";
+ dumpPointer(T.getAsOpaquePtr());
+ OS << " ";
+ dumpBareType(T, false);
+ OS << " " << T.split().Quals.getAsString();
+ dumpTypeAsChild(T.split().Ty);
+ });
+}
+
+void ASTDumper::dumpTypeAsChild(const Type *T) {
+ dumpChild([=] {
+ if (!T) {
+ ColorScope Color(*this, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+
+ {
+ ColorScope Color(*this, TypeColor);
+ OS << T->getTypeClassName() << "Type";
+ }
+ dumpPointer(T);
+ OS << " ";
+ dumpBareType(QualType(T, 0), false);
+
+ QualType SingleStepDesugar =
+ T->getLocallyUnqualifiedSingleStepDesugaredType();
+ if (SingleStepDesugar != QualType(T, 0))
+ OS << " sugar";
+ if (T->isDependentType())
+ OS << " dependent";
+ else if (T->isInstantiationDependentType())
+ OS << " instantiation_dependent";
+ if (T->isVariablyModifiedType())
+ OS << " variably_modified";
+ if (T->containsUnexpandedParameterPack())
+ OS << " contains_unexpanded_pack";
+ if (T->isFromAST())
+ OS << " imported";
+
+ TypeVisitor<ASTDumper>::Visit(T);
+
+ if (SingleStepDesugar != QualType(T, 0))
+ dumpTypeAsChild(SingleStepDesugar);
+ });
+}
+
void ASTDumper::dumpBareDeclRef(const Decl *D) {
{
ColorScope Color(*this, DeclKindNameColor);
@@ -530,10 +697,11 @@ void ASTDumper::dumpDeclRef(const Decl *D, const char *Label) {
if (!D)
return;
- IndentScope Indent(*this);
- if (Label)
- OS << Label << ' ';
- dumpBareDeclRef(D);
+ dumpChild([=]{
+ if (Label)
+ OS << Label << ' ';
+ dumpBareDeclRef(D);
+ });
}
void ASTDumper::dumpName(const NamedDecl *ND) {
@@ -555,86 +723,96 @@ void ASTDumper::dumpDeclContext(const DeclContext *DC) {
if (!DC)
return;
- ChildDumper Children(*this);
for (auto *D : DC->noload_decls())
- Children.dump(D);
+ dumpDecl(D);
if (DC->hasExternalLexicalStorage()) {
- Children.release();
-
- lastChild();
- IndentScope Indent(*this);
- ColorScope Color(*this, UndeserializedColor);
- OS << "<undeserialized declarations>";
+ dumpChild([=]{
+ ColorScope Color(*this, UndeserializedColor);
+ OS << "<undeserialized declarations>";
+ });
}
}
-void ASTDumper::dumpLookups(const DeclContext *DC) {
- IndentScope Indent(*this);
+void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
+ dumpChild([=] {
+ OS << "StoredDeclsMap ";
+ dumpBareDeclRef(cast<Decl>(DC));
- OS << "StoredDeclsMap ";
- dumpBareDeclRef(cast<Decl>(DC));
+ const DeclContext *Primary = DC->getPrimaryContext();
+ if (Primary != DC) {
+ OS << " primary";
+ dumpPointer(cast<Decl>(Primary));
+ }
- const DeclContext *Primary = DC->getPrimaryContext();
- if (Primary != DC) {
- OS << " primary";
- dumpPointer(cast<Decl>(Primary));
- }
+ bool HasUndeserializedLookups = Primary->hasExternalVisibleStorage();
- bool HasUndeserializedLookups = Primary->hasExternalVisibleStorage();
+ DeclContext::all_lookups_iterator I = Primary->noload_lookups_begin(),
+ E = Primary->noload_lookups_end();
+ while (I != E) {
+ DeclarationName Name = I.getLookupName();
+ DeclContextLookupResult R = *I++;
- DeclContext::all_lookups_iterator I = Primary->noload_lookups_begin(),
- E = Primary->noload_lookups_end();
- while (I != E) {
- DeclarationName Name = I.getLookupName();
- DeclContextLookupResult R = *I++;
- if (I == E && !HasUndeserializedLookups)
- lastChild();
+ dumpChild([=] {
+ OS << "DeclarationName ";
+ {
+ ColorScope Color(*this, DeclNameColor);
+ OS << '\'' << Name << '\'';
+ }
- IndentScope Indent(*this);
- OS << "DeclarationName ";
- {
- ColorScope Color(*this, DeclNameColor);
- OS << '\'' << Name << '\'';
+ for (DeclContextLookupResult::iterator RI = R.begin(), RE = R.end();
+ RI != RE; ++RI) {
+ dumpChild([=] {
+ dumpBareDeclRef(*RI);
+
+ if ((*RI)->isHidden())
+ OS << " hidden";
+
+ // If requested, dump the redecl chain for this lookup.
+ if (DumpDecls) {
+ // Dump earliest decl first.
+ std::function<void(Decl *)> DumpWithPrev = [&](Decl *D) {
+ if (Decl *Prev = D->getPreviousDecl())
+ DumpWithPrev(Prev);
+ dumpDecl(D);
+ };
+ DumpWithPrev(*RI);
+ }
+ });
+ }
+ });
}
- for (DeclContextLookupResult::iterator RI = R.begin(), RE = R.end();
- RI != RE; ++RI) {
- if (RI + 1 == RE)
- lastChild();
- dumpDeclRef(*RI);
- if ((*RI)->isHidden())
- OS << " hidden";
+ if (HasUndeserializedLookups) {
+ dumpChild([=] {
+ ColorScope Color(*this, UndeserializedColor);
+ OS << "<undeserialized lookups>";
+ });
}
- }
-
- if (HasUndeserializedLookups) {
- lastChild();
- IndentScope Indent(*this);
- ColorScope Color(*this, UndeserializedColor);
- OS << "<undeserialized lookups>";
- }
+ });
}
void ASTDumper::dumpAttr(const Attr *A) {
- IndentScope Indent(*this);
- {
- ColorScope Color(*this, AttrColor);
+ dumpChild([=] {
+ {
+ ColorScope Color(*this, AttrColor);
- switch (A->getKind()) {
+ switch (A->getKind()) {
#define ATTR(X) case attr::X: OS << #X; break;
#include "clang/Basic/AttrList.inc"
- default: llvm_unreachable("unexpected attribute kind");
+ default:
+ llvm_unreachable("unexpected attribute kind");
+ }
+ OS << "Attr";
}
- OS << "Attr";
- }
- dumpPointer(A);
- dumpSourceRange(A->getRange());
- if (A->isInherited())
- OS << " Inherited";
- if (A->isImplicit())
- OS << " Implicit";
+ dumpPointer(A);
+ dumpSourceRange(A->getRange());
+ if (A->isInherited())
+ OS << " Inherited";
+ if (A->isImplicit())
+ OS << " Implicit";
#include "clang/AST/AttrDump.inc"
+ });
}
static void dumpPreviousDeclImpl(raw_ostream &OS, ...) {}
@@ -687,15 +865,20 @@ void ASTDumper::dumpAccessSpecifier(AccessSpecifier AS) {
}
void ASTDumper::dumpCXXCtorInitializer(const CXXCtorInitializer *Init) {
- IndentScope Indent(*this);
- OS << "CXXCtorInitializer";
- if (Init->isAnyMemberInitializer()) {
- OS << ' ';
- dumpBareDeclRef(Init->getAnyMember());
- } else {
- dumpType(QualType(Init->getBaseClass(), 0));
- }
- dumpStmt(Init->getInit());
+ dumpChild([=] {
+ OS << "CXXCtorInitializer";
+ if (Init->isAnyMemberInitializer()) {
+ OS << ' ';
+ dumpBareDeclRef(Init->getAnyMember());
+ } else if (Init->isBaseInitializer()) {
+ dumpType(QualType(Init->getBaseClass(), 0));
+ } else if (Init->isDelegatingInitializer()) {
+ dumpType(Init->getTypeSourceInfo()->getType());
+ } else {
+ llvm_unreachable("Unknown initializer type");
+ }
+ dumpStmt(Init->getInit());
+ });
}
void ASTDumper::dumpTemplateParameters(const TemplateParameterList *TPL) {
@@ -709,11 +892,8 @@ void ASTDumper::dumpTemplateParameters(const TemplateParameterList *TPL) {
void ASTDumper::dumpTemplateArgumentListInfo(
const TemplateArgumentListInfo &TALI) {
- for (unsigned i = 0, e = TALI.size(); i < e; ++i) {
- if (i + 1 == e)
- lastChild();
+ for (unsigned i = 0, e = TALI.size(); i < e; ++i)
dumpTemplateArgumentLoc(TALI[i]);
- }
}
void ASTDumper::dumpTemplateArgumentLoc(const TemplateArgumentLoc &A) {
@@ -726,54 +906,49 @@ void ASTDumper::dumpTemplateArgumentList(const TemplateArgumentList &TAL) {
}
void ASTDumper::dumpTemplateArgument(const TemplateArgument &A, SourceRange R) {
- IndentScope Indent(*this);
- OS << "TemplateArgument";
- if (R.isValid())
- dumpSourceRange(R);
-
- switch (A.getKind()) {
- case TemplateArgument::Null:
- OS << " null";
- break;
- case TemplateArgument::Type:
- OS << " type";
- lastChild();
- dumpType(A.getAsType());
- break;
- case TemplateArgument::Declaration:
- OS << " decl";
- lastChild();
- dumpDeclRef(A.getAsDecl());
- break;
- case TemplateArgument::NullPtr:
- OS << " nullptr";
- break;
- case TemplateArgument::Integral:
- OS << " integral " << A.getAsIntegral();
- break;
- case TemplateArgument::Template:
- OS << " template ";
- A.getAsTemplate().dump(OS);
- break;
- case TemplateArgument::TemplateExpansion:
- OS << " template expansion";
- A.getAsTemplateOrTemplatePattern().dump(OS);
- break;
- case TemplateArgument::Expression:
- OS << " expr";
- lastChild();
- dumpStmt(A.getAsExpr());
- break;
- case TemplateArgument::Pack:
- OS << " pack";
- for (TemplateArgument::pack_iterator I = A.pack_begin(), E = A.pack_end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
- dumpTemplateArgument(*I);
+ dumpChild([=] {
+ OS << "TemplateArgument";
+ if (R.isValid())
+ dumpSourceRange(R);
+
+ switch (A.getKind()) {
+ case TemplateArgument::Null:
+ OS << " null";
+ break;
+ case TemplateArgument::Type:
+ OS << " type";
+ dumpType(A.getAsType());
+ break;
+ case TemplateArgument::Declaration:
+ OS << " decl";
+ dumpDeclRef(A.getAsDecl());
+ break;
+ case TemplateArgument::NullPtr:
+ OS << " nullptr";
+ break;
+ case TemplateArgument::Integral:
+ OS << " integral " << A.getAsIntegral();
+ break;
+ case TemplateArgument::Template:
+ OS << " template ";
+ A.getAsTemplate().dump(OS);
+ break;
+ case TemplateArgument::TemplateExpansion:
+ OS << " template expansion";
+ A.getAsTemplateOrTemplatePattern().dump(OS);
+ break;
+ case TemplateArgument::Expression:
+ OS << " expr";
+ dumpStmt(A.getAsExpr());
+ break;
+ case TemplateArgument::Pack:
+ OS << " pack";
+ for (TemplateArgument::pack_iterator I = A.pack_begin(), E = A.pack_end();
+ I != E; ++I)
+ dumpTemplateArgument(*I);
+ break;
}
- break;
- }
+ });
}
//===----------------------------------------------------------------------===//
@@ -781,64 +956,57 @@ void ASTDumper::dumpTemplateArgument(const TemplateArgument &A, SourceRange R) {
//===----------------------------------------------------------------------===//
void ASTDumper::dumpDecl(const Decl *D) {
- IndentScope Indent(*this);
+ dumpChild([=] {
+ if (!D) {
+ ColorScope Color(*this, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
- if (!D) {
- ColorScope Color(*this, NullColor);
- OS << "<<<NULL>>>";
- return;
- }
+ {
+ ColorScope Color(*this, DeclKindNameColor);
+ OS << D->getDeclKindName() << "Decl";
+ }
+ dumpPointer(D);
+ if (D->getLexicalDeclContext() != D->getDeclContext())
+ OS << " parent " << cast<Decl>(D->getDeclContext());
+ dumpPreviousDecl(OS, D);
+ dumpSourceRange(D->getSourceRange());
+ OS << ' ';
+ dumpLocation(D->getLocation());
+ if (Module *M = D->getOwningModule())
+ OS << " in " << M->getFullModuleName();
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (ND->isHidden())
+ OS << " hidden";
+ if (D->isImplicit())
+ OS << " implicit";
+ if (D->isUsed())
+ OS << " used";
+ else if (D->isThisDeclarationReferenced())
+ OS << " referenced";
+ if (D->isInvalidDecl())
+ OS << " invalid";
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isConstexpr())
+ OS << " constexpr";
- {
- ColorScope Color(*this, DeclKindNameColor);
- OS << D->getDeclKindName() << "Decl";
- }
- dumpPointer(D);
- if (D->getLexicalDeclContext() != D->getDeclContext())
- OS << " parent " << cast<Decl>(D->getDeclContext());
- dumpPreviousDecl(OS, D);
- dumpSourceRange(D->getSourceRange());
- OS << ' ';
- dumpLocation(D->getLocation());
- if (Module *M = D->getOwningModule())
- OS << " in " << M->getFullModuleName();
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
- if (ND->isHidden())
- OS << " hidden";
- if (D->isImplicit())
- OS << " implicit";
- if (D->isUsed())
- OS << " used";
- else if (D->isReferenced())
- OS << " referenced";
- if (D->isInvalidDecl())
- OS << " invalid";
-
- bool HasAttrs = D->hasAttrs();
- const FullComment *Comment =
- D->getASTContext().getLocalCommentForDeclUncached(D);
- // Decls within functions are visited by the body
- bool HasDeclContext = !isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D) &&
- hasNodes(dyn_cast<DeclContext>(D));
-
- setMoreChildren(HasAttrs || Comment || HasDeclContext);
- ConstDeclVisitor<ASTDumper>::Visit(D);
-
- setMoreChildren(Comment || HasDeclContext);
- for (Decl::attr_iterator I = D->attr_begin(), E = D->attr_end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
- dumpAttr(*I);
- }
- setMoreChildren(HasDeclContext);
- lastChild();
- dumpFullComment(Comment);
+ ConstDeclVisitor<ASTDumper>::Visit(D);
- setMoreChildren(false);
- if (HasDeclContext)
- dumpDeclContext(cast<DeclContext>(D));
+ for (Decl::attr_iterator I = D->attr_begin(), E = D->attr_end(); I != E;
+ ++I)
+ dumpAttr(*I);
+
+ if (const FullComment *Comment =
+ D->getASTContext().getLocalCommentForDeclUncached(D))
+ dumpFullComment(Comment);
+
+ // Decls within functions are visited by the body.
+ if (!isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D) &&
+ hasNodes(dyn_cast<DeclContext>(D)))
+ dumpDeclContext(cast<DeclContext>(D));
+ });
}
void ASTDumper::VisitLabelDecl(const LabelDecl *D) {
@@ -878,19 +1046,16 @@ void ASTDumper::VisitRecordDecl(const RecordDecl *D) {
void ASTDumper::VisitEnumConstantDecl(const EnumConstantDecl *D) {
dumpName(D);
dumpType(D->getType());
- if (const Expr *Init = D->getInitExpr()) {
- lastChild();
+ if (const Expr *Init = D->getInitExpr())
dumpStmt(Init);
- }
}
void ASTDumper::VisitIndirectFieldDecl(const IndirectFieldDecl *D) {
dumpName(D);
dumpType(D->getType());
- ChildDumper Children(*this);
for (auto *Child : D->chain())
- Children.dumpRef(Child);
+ dumpDeclRef(Child);
}
void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
@@ -914,73 +1079,39 @@ void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
if (const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>()) {
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
- switch (EPI.ExceptionSpecType) {
+ switch (EPI.ExceptionSpec.Type) {
default: break;
case EST_Unevaluated:
- OS << " noexcept-unevaluated " << EPI.ExceptionSpecDecl;
+ OS << " noexcept-unevaluated " << EPI.ExceptionSpec.SourceDecl;
break;
case EST_Uninstantiated:
- OS << " noexcept-uninstantiated " << EPI.ExceptionSpecTemplate;
+ OS << " noexcept-uninstantiated " << EPI.ExceptionSpec.SourceTemplate;
break;
}
}
- bool OldMoreChildren = hasMoreChildren();
- const FunctionTemplateSpecializationInfo *FTSI =
- D->getTemplateSpecializationInfo();
- bool HasTemplateSpecialization = FTSI;
-
- bool HasNamedDecls = D->getDeclsInPrototypeScope().begin() !=
- D->getDeclsInPrototypeScope().end();
-
- bool HasFunctionDecls = D->param_begin() != D->param_end();
-
- const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(D);
- bool HasCtorInitializers = C && C->init_begin() != C->init_end();
-
- bool HasDeclarationBody = D->doesThisDeclarationHaveABody();
-
- setMoreChildren(OldMoreChildren || HasNamedDecls || HasFunctionDecls ||
- HasCtorInitializers || HasDeclarationBody);
- if (HasTemplateSpecialization) {
- lastChild();
+ if (const FunctionTemplateSpecializationInfo *FTSI =
+ D->getTemplateSpecializationInfo())
dumpTemplateArgumentList(*FTSI->TemplateArguments);
- }
- setMoreChildren(OldMoreChildren || HasFunctionDecls ||
- HasCtorInitializers || HasDeclarationBody);
for (ArrayRef<NamedDecl *>::iterator
I = D->getDeclsInPrototypeScope().begin(),
- E = D->getDeclsInPrototypeScope().end(); I != E; ++I) {
- if (I + 1 == E)
- lastChild();
+ E = D->getDeclsInPrototypeScope().end(); I != E; ++I)
dumpDecl(*I);
- }
- setMoreChildren(OldMoreChildren || HasCtorInitializers || HasDeclarationBody);
for (FunctionDecl::param_const_iterator I = D->param_begin(),
E = D->param_end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
+ I != E; ++I)
dumpDecl(*I);
- }
-
- setMoreChildren(OldMoreChildren || HasDeclarationBody);
- if (HasCtorInitializers)
+
+ if (const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(D))
for (CXXConstructorDecl::init_const_iterator I = C->init_begin(),
E = C->init_end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
+ I != E; ++I)
dumpCXXCtorInitializer(*I);
- }
- setMoreChildren(OldMoreChildren);
- if (HasDeclarationBody) {
- lastChild();
+ if (D->doesThisDeclarationHaveABody())
dumpStmt(D->getBody());
- }
}
void ASTDumper::VisitFieldDecl(const FieldDecl *D) {
@@ -991,21 +1122,10 @@ void ASTDumper::VisitFieldDecl(const FieldDecl *D) {
if (D->isModulePrivate())
OS << " __module_private__";
- bool OldMoreChildren = hasMoreChildren();
- bool IsBitField = D->isBitField();
- Expr *Init = D->getInClassInitializer();
- bool HasInit = Init;
-
- setMoreChildren(OldMoreChildren || HasInit);
- if (IsBitField) {
- lastChild();
+ if (D->isBitField())
dumpStmt(D->getBitWidth());
- }
- setMoreChildren(OldMoreChildren);
- if (HasInit) {
- lastChild();
+ if (Expr *Init = D->getInClassInitializer())
dumpStmt(Init);
- }
}
void ASTDumper::VisitVarDecl(const VarDecl *D) {
@@ -1029,13 +1149,11 @@ void ASTDumper::VisitVarDecl(const VarDecl *D) {
case VarDecl::CallInit: OS << " callinit"; break;
case VarDecl::ListInit: OS << " listinit"; break;
}
- lastChild();
dumpStmt(D->getInit());
}
}
void ASTDumper::VisitFileScopeAsmDecl(const FileScopeAsmDecl *D) {
- lastChild();
dumpStmt(D->getAsmString());
}
@@ -1082,25 +1200,24 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
return;
for (const auto &I : D->bases()) {
- IndentScope Indent(*this);
- if (I.isVirtual())
- OS << "virtual ";
- dumpAccessSpecifier(I.getAccessSpecifier());
- dumpType(I.getType());
- if (I.isPackExpansion())
- OS << "...";
+ dumpChild([=] {
+ if (I.isVirtual())
+ OS << "virtual ";
+ dumpAccessSpecifier(I.getAccessSpecifier());
+ dumpType(I.getType());
+ if (I.isPackExpansion())
+ OS << "...";
+ });
}
}
void ASTDumper::VisitStaticAssertDecl(const StaticAssertDecl *D) {
dumpStmt(D->getAssertExpr());
- lastChild();
dumpStmt(D->getMessage());
}
template<typename SpecializationDecl>
-void ASTDumper::VisitTemplateDeclSpecialization(ChildDumper &Children,
- const SpecializationDecl *D,
+void ASTDumper::VisitTemplateDeclSpecialization(const SpecializationDecl *D,
bool DumpExplicitInst,
bool DumpRefOnly) {
bool DumpedAny = false;
@@ -1125,7 +1242,10 @@ void ASTDumper::VisitTemplateDeclSpecialization(ChildDumper &Children,
// Fall through.
case TSK_Undeclared:
case TSK_ImplicitInstantiation:
- Children.dump(Redecl, DumpRefOnly);
+ if (DumpRefOnly)
+ dumpDeclRef(Redecl);
+ else
+ dumpDecl(Redecl);
DumpedAny = true;
break;
case TSK_ExplicitSpecialization:
@@ -1135,7 +1255,7 @@ void ASTDumper::VisitTemplateDeclSpecialization(ChildDumper &Children,
// Ensure we dump at least one decl for each specialization.
if (!DumpedAny)
- Children.dumpRef(D);
+ dumpDeclRef(D);
}
template<typename TemplateDecl>
@@ -1144,11 +1264,10 @@ void ASTDumper::VisitTemplateDecl(const TemplateDecl *D,
dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
- ChildDumper Children(*this);
- Children.dump(D->getTemplatedDecl());
+ dumpDecl(D->getTemplatedDecl());
for (auto *Child : D->specializations())
- VisitTemplateDeclSpecialization(Children, Child, DumpExplicitInst,
+ VisitTemplateDeclSpecialization(Child, DumpExplicitInst,
!D->isCanonicalDecl());
}
@@ -1206,10 +1325,8 @@ void ASTDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
if (D->isParameterPack())
OS << " ...";
dumpName(D);
- if (D->hasDefaultArgument()) {
- lastChild();
+ if (D->hasDefaultArgument())
dumpTemplateArgument(D->getDefaultArgument());
- }
}
void ASTDumper::VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D) {
@@ -1217,10 +1334,8 @@ void ASTDumper::VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D) {
if (D->isParameterPack())
OS << " ...";
dumpName(D);
- if (D->hasDefaultArgument()) {
- lastChild();
+ if (D->hasDefaultArgument())
dumpTemplateArgument(D->getDefaultArgument());
- }
}
void ASTDumper::VisitTemplateTemplateParmDecl(
@@ -1229,10 +1344,8 @@ void ASTDumper::VisitTemplateTemplateParmDecl(
OS << " ...";
dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
- if (D->hasDefaultArgument()) {
- lastChild();
+ if (D->hasDefaultArgument())
dumpTemplateArgumentLoc(D->getDefaultArgument());
- }
}
void ASTDumper::VisitUsingDecl(const UsingDecl *D) {
@@ -1273,7 +1386,6 @@ void ASTDumper::VisitAccessSpecDecl(const AccessSpecDecl *D) {
}
void ASTDumper::VisitFriendDecl(const FriendDecl *D) {
- lastChild();
if (TypeSourceInfo *T = D->getFriendType())
dumpType(T->getType());
else
@@ -1317,96 +1429,66 @@ void ASTDumper::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
dumpName(D);
dumpType(D->getReturnType());
- bool OldMoreChildren = hasMoreChildren();
- bool IsVariadic = D->isVariadic();
- bool HasBody = D->hasBody();
-
- setMoreChildren(OldMoreChildren || IsVariadic || HasBody);
if (D->isThisDeclarationADefinition()) {
- lastChild();
dumpDeclContext(D);
} else {
for (ObjCMethodDecl::param_const_iterator I = D->param_begin(),
E = D->param_end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
+ I != E; ++I)
dumpDecl(*I);
- }
}
- setMoreChildren(OldMoreChildren || HasBody);
- if (IsVariadic) {
- lastChild();
- IndentScope Indent(*this);
- OS << "...";
- }
+ if (D->isVariadic())
+ dumpChild([=] { OS << "..."; });
- setMoreChildren(OldMoreChildren);
- if (HasBody) {
- lastChild();
+ if (D->hasBody())
dumpStmt(D->getBody());
- }
}
void ASTDumper::VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
dumpName(D);
dumpDeclRef(D->getClassInterface());
- if (D->protocol_begin() == D->protocol_end())
- lastChild();
dumpDeclRef(D->getImplementation());
for (ObjCCategoryDecl::protocol_iterator I = D->protocol_begin(),
E = D->protocol_end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
+ I != E; ++I)
dumpDeclRef(*I);
- }
}
void ASTDumper::VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D) {
dumpName(D);
dumpDeclRef(D->getClassInterface());
- lastChild();
dumpDeclRef(D->getCategoryDecl());
}
void ASTDumper::VisitObjCProtocolDecl(const ObjCProtocolDecl *D) {
dumpName(D);
- ChildDumper Children(*this);
for (auto *Child : D->protocols())
- Children.dumpRef(Child);
+ dumpDeclRef(Child);
}
void ASTDumper::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
dumpName(D);
dumpDeclRef(D->getSuperClass(), "super");
- ChildDumper Children(*this);
- Children.dumpRef(D->getImplementation());
+ dumpDeclRef(D->getImplementation());
for (auto *Child : D->protocols())
- Children.dumpRef(Child);
+ dumpDeclRef(Child);
}
void ASTDumper::VisitObjCImplementationDecl(const ObjCImplementationDecl *D) {
dumpName(D);
dumpDeclRef(D->getSuperClass(), "super");
- if (D->init_begin() == D->init_end())
- lastChild();
dumpDeclRef(D->getClassInterface());
for (ObjCImplementationDecl::init_const_iterator I = D->init_begin(),
E = D->init_end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
+ I != E; ++I)
dumpCXXCtorInitializer(*I);
- }
}
void ASTDumper::VisitObjCCompatibleAliasDecl(const ObjCCompatibleAliasDecl *D) {
dumpName(D);
- lastChild();
dumpDeclRef(D->getClassInterface());
}
@@ -1441,15 +1523,10 @@ void ASTDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
OS << " strong";
if (Attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained)
OS << " unsafe_unretained";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_getter) {
- if (!(Attrs & ObjCPropertyDecl::OBJC_PR_setter))
- lastChild();
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_getter)
dumpDeclRef(D->getGetterMethodDecl(), "getter");
- }
- if (Attrs & ObjCPropertyDecl::OBJC_PR_setter) {
- lastChild();
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_setter)
dumpDeclRef(D->getSetterMethodDecl(), "setter");
- }
}
}
@@ -1460,7 +1537,6 @@ void ASTDumper::VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D) {
else
OS << " dynamic";
dumpDeclRef(D->getPropertyDecl());
- lastChild();
dumpDeclRef(D->getPropertyIvarDecl());
}
@@ -1468,30 +1544,27 @@ void ASTDumper::VisitBlockDecl(const BlockDecl *D) {
for (auto I : D->params())
dumpDecl(I);
- if (D->isVariadic()) {
- IndentScope Indent(*this);
- OS << "...";
- }
+ if (D->isVariadic())
+ dumpChild([=]{ OS << "..."; });
+
+ if (D->capturesCXXThis())
+ dumpChild([=]{ OS << "capture this"; });
- if (D->capturesCXXThis()) {
- IndentScope Indent(*this);
- OS << "capture this";
- }
for (const auto &I : D->captures()) {
- IndentScope Indent(*this);
- OS << "capture";
- if (I.isByRef())
- OS << " byref";
- if (I.isNested())
- OS << " nested";
- if (I.getVariable()) {
- OS << ' ';
- dumpBareDeclRef(I.getVariable());
- }
- if (I.hasCopyExpr())
- dumpStmt(I.getCopyExpr());
+ dumpChild([=] {
+ OS << "capture";
+ if (I.isByRef())
+ OS << " byref";
+ if (I.isNested())
+ OS << " nested";
+ if (I.getVariable()) {
+ OS << ' ';
+ dumpBareDeclRef(I.getVariable());
+ }
+ if (I.hasCopyExpr())
+ dumpStmt(I.getCopyExpr());
+ });
}
- lastChild();
dumpStmt(D->getBody());
}
@@ -1500,29 +1573,23 @@ void ASTDumper::VisitBlockDecl(const BlockDecl *D) {
//===----------------------------------------------------------------------===//
void ASTDumper::dumpStmt(const Stmt *S) {
- IndentScope Indent(*this);
+ dumpChild([=] {
+ if (!S) {
+ ColorScope Color(*this, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
- if (!S) {
- ColorScope Color(*this, NullColor);
- OS << "<<<NULL>>>";
- return;
- }
+ if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+ VisitDeclStmt(DS);
+ return;
+ }
- if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
- VisitDeclStmt(DS);
- return;
- }
+ ConstStmtVisitor<ASTDumper>::Visit(S);
- setMoreChildren(!S->children().empty());
- ConstStmtVisitor<ASTDumper>::Visit(S);
- setMoreChildren(false);
- for (Stmt::const_child_range CI = S->children(); CI; ++CI) {
- Stmt::const_child_range Next = CI;
- ++Next;
- if (!Next)
- lastChild();
- dumpStmt(*CI);
- }
+ for (Stmt::const_child_range CI = S->children(); CI; ++CI)
+ dumpStmt(*CI);
+ });
}
void ASTDumper::VisitStmt(const Stmt *Node) {
@@ -1538,22 +1605,16 @@ void ASTDumper::VisitDeclStmt(const DeclStmt *Node) {
VisitStmt(Node);
for (DeclStmt::const_decl_iterator I = Node->decl_begin(),
E = Node->decl_end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
+ I != E; ++I)
dumpDecl(*I);
- }
}
void ASTDumper::VisitAttributedStmt(const AttributedStmt *Node) {
VisitStmt(Node);
for (ArrayRef<const Attr *>::iterator I = Node->getAttrs().begin(),
E = Node->getAttrs().end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
+ I != E; ++I)
dumpAttr(*I);
- }
}
void ASTDumper::VisitLabelStmt(const LabelStmt *Node) {
@@ -1693,15 +1754,7 @@ void ASTDumper::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node) {
void ASTDumper::VisitPredefinedExpr(const PredefinedExpr *Node) {
VisitExpr(Node);
- switch (Node->getIdentType()) {
- default: llvm_unreachable("unknown case");
- case PredefinedExpr::Func: OS << " __func__"; break;
- case PredefinedExpr::Function: OS << " __FUNCTION__"; break;
- case PredefinedExpr::FuncDName: OS << " __FUNCDNAME__"; break;
- case PredefinedExpr::LFunction: OS << " L__FUNCTION__"; break;
- case PredefinedExpr::PrettyFunction: OS << " __PRETTY_FUNCTION__";break;
- case PredefinedExpr::FuncSig: OS << " __FUNCSIG__"; break;
- }
+ OS << " " << PredefinedExpr::getIdentTypeName(Node->getIdentType());
}
void ASTDumper::VisitCharacterLiteral(const CharacterLiteral *Node) {
@@ -1734,12 +1787,10 @@ void ASTDumper::VisitStringLiteral(const StringLiteral *Str) {
void ASTDumper::VisitInitListExpr(const InitListExpr *ILE) {
VisitExpr(ILE);
if (auto *Filler = ILE->getArrayFiller()) {
- if (!ILE->getNumInits())
- lastChild();
- IndentScope Indent(*this);
- OS << "array filler";
- lastChild();
- dumpStmt(Filler);
+ dumpChild([=] {
+ OS << "array filler";
+ dumpStmt(Filler);
+ });
}
if (auto *Field = ILE->getInitializedFieldInUnion()) {
OS << " field ";
@@ -1805,10 +1856,8 @@ void ASTDumper::VisitBlockExpr(const BlockExpr *Node) {
void ASTDumper::VisitOpaqueValueExpr(const OpaqueValueExpr *Node) {
VisitExpr(Node);
- if (Expr *Source = Node->getSourceExpr()) {
- lastChild();
+ if (Expr *Source = Node->getSourceExpr())
dumpStmt(Source);
- }
}
// GNU extensions.
@@ -2024,27 +2073,24 @@ void ASTDumper::dumpFullComment(const FullComment *C) {
}
void ASTDumper::dumpComment(const Comment *C) {
- IndentScope Indent(*this);
-
- if (!C) {
- ColorScope Color(*this, NullColor);
- OS << "<<<NULL>>>";
- return;
- }
+ dumpChild([=] {
+ if (!C) {
+ ColorScope Color(*this, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
- {
- ColorScope Color(*this, CommentColor);
- OS << C->getCommentKindName();
- }
- dumpPointer(C);
- dumpSourceRange(C->getSourceRange());
- ConstCommentVisitor<ASTDumper>::visit(C);
- for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
- dumpComment(*I);
- }
+ {
+ ColorScope Color(*this, CommentColor);
+ OS << C->getCommentKindName();
+ }
+ dumpPointer(C);
+ dumpSourceRange(C->getSourceRange());
+ ConstCommentVisitor<ASTDumper>::visit(C);
+ for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
+ I != E; ++I)
+ dumpComment(*I);
+ });
}
void ASTDumper::visitTextComment(const TextComment *C) {
@@ -2148,6 +2194,23 @@ void ASTDumper::visitVerbatimLineComment(const VerbatimLineComment *C) {
}
//===----------------------------------------------------------------------===//
+// Type method implementations
+//===----------------------------------------------------------------------===//
+
+void QualType::dump(const char *msg) const {
+ if (msg)
+ llvm::errs() << msg << ": ";
+ dump();
+}
+
+LLVM_DUMP_METHOD void QualType::dump() const {
+ ASTDumper Dumper(llvm::errs(), nullptr, nullptr);
+ Dumper.dumpTypeAsChild(*this);
+}
+
+LLVM_DUMP_METHOD void Type::dump() const { QualType(this, 0).dump(); }
+
+//===----------------------------------------------------------------------===//
// Decl method implementations
//===----------------------------------------------------------------------===//
@@ -2169,13 +2232,14 @@ LLVM_DUMP_METHOD void DeclContext::dumpLookups() const {
dumpLookups(llvm::errs());
}
-LLVM_DUMP_METHOD void DeclContext::dumpLookups(raw_ostream &OS) const {
+LLVM_DUMP_METHOD void DeclContext::dumpLookups(raw_ostream &OS,
+ bool DumpDecls) const {
const DeclContext *DC = this;
while (!DC->isTranslationUnit())
DC = DC->getParent();
ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &Ctx.getSourceManager());
- P.dumpLookups(this);
+ P.dumpLookups(this, DumpDecls);
}
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index b0e0b1dc9e00..2442e8ec25f1 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -151,6 +151,7 @@ namespace clang {
Decl *VisitObjCMethodDecl(ObjCMethodDecl *D);
Decl *VisitObjCCategoryDecl(ObjCCategoryDecl *D);
Decl *VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ Decl *VisitLinkageSpecDecl(LinkageSpecDecl *D);
Decl *VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
Decl *VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
Decl *VisitObjCImplementationDecl(ObjCImplementationDecl *D);
@@ -1622,15 +1623,14 @@ QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
ToEPI.HasTrailingReturn = FromEPI.HasTrailingReturn;
ToEPI.TypeQuals = FromEPI.TypeQuals;
ToEPI.RefQualifier = FromEPI.RefQualifier;
- ToEPI.NumExceptions = ExceptionTypes.size();
- ToEPI.Exceptions = ExceptionTypes.data();
- ToEPI.ConsumedParameters = FromEPI.ConsumedParameters;
- ToEPI.ExceptionSpecType = FromEPI.ExceptionSpecType;
- ToEPI.NoexceptExpr = Importer.Import(FromEPI.NoexceptExpr);
- ToEPI.ExceptionSpecDecl = cast_or_null<FunctionDecl>(
- Importer.Import(FromEPI.ExceptionSpecDecl));
- ToEPI.ExceptionSpecTemplate = cast_or_null<FunctionDecl>(
- Importer.Import(FromEPI.ExceptionSpecTemplate));
+ ToEPI.ExceptionSpec.Type = FromEPI.ExceptionSpec.Type;
+ ToEPI.ExceptionSpec.Exceptions = ExceptionTypes;
+ ToEPI.ExceptionSpec.NoexceptExpr =
+ Importer.Import(FromEPI.ExceptionSpec.NoexceptExpr);
+ ToEPI.ExceptionSpec.SourceDecl = cast_or_null<FunctionDecl>(
+ Importer.Import(FromEPI.ExceptionSpec.SourceDecl));
+ ToEPI.ExceptionSpec.SourceTemplate = cast_or_null<FunctionDecl>(
+ Importer.Import(FromEPI.ExceptionSpec.SourceTemplate));
return Importer.getToContext().getFunctionType(ToResultType, ArgTypes, ToEPI);
}
@@ -2093,10 +2093,11 @@ ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
}
case TemplateArgument::Declaration: {
- ValueDecl *FromD = From.getAsDecl();
- if (ValueDecl *To = cast_or_null<ValueDecl>(Importer.Import(FromD)))
- return TemplateArgument(To, From.isDeclForReferenceParam());
- return TemplateArgument();
+ ValueDecl *To = cast_or_null<ValueDecl>(Importer.Import(From.getAsDecl()));
+ QualType ToType = Importer.Import(From.getParamTypeForDecl());
+ if (!To || ToType.isNull())
+ return TemplateArgument();
+ return TemplateArgument(To, ToType);
}
case TemplateArgument::NullPtr: {
@@ -2253,7 +2254,7 @@ Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
} else {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Namespace))
continue;
@@ -2316,7 +2317,7 @@ Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
@@ -2396,7 +2397,7 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
if (!DC->isFunctionOrMethod() && SearchName) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(SearchName, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
@@ -2482,7 +2483,7 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(SearchName, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
@@ -2604,7 +2605,7 @@ Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
@@ -2656,7 +2657,7 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
@@ -2712,8 +2713,9 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// FunctionDecl that we are importing the FunctionProtoType for.
// To avoid an infinite recursion when importing, create the FunctionDecl
// with a simplified function type and update it afterwards.
- if (FromEPI.ExceptionSpecDecl || FromEPI.ExceptionSpecTemplate ||
- FromEPI.NoexceptExpr) {
+ if (FromEPI.ExceptionSpec.SourceDecl ||
+ FromEPI.ExceptionSpec.SourceTemplate ||
+ FromEPI.ExceptionSpec.NoexceptExpr) {
FunctionProtoType::ExtProtoInfo DefaultEPI;
FromTy = Importer.getFromContext().getFunctionType(
FromFPT->getReturnType(), FromFPT->getParamTypes(), DefaultEPI);
@@ -2858,7 +2860,7 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
// Determine whether we've already imported this field.
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (FieldDecl *FoundField = dyn_cast<FieldDecl>(FoundDecls[I])) {
// For anonymous fields, match up by index.
@@ -2914,7 +2916,7 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
// Determine whether we've already imported this field.
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (IndirectFieldDecl *FoundField
= dyn_cast<IndirectFieldDecl>(FoundDecls[I])) {
@@ -2958,9 +2960,12 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
}
IndirectFieldDecl *ToIndirectField = IndirectFieldDecl::Create(
- Importer.getToContext(), DC,
- Loc, Name.getAsIdentifierInfo(), T,
- NamedChain, D->getChainingSize());
+ Importer.getToContext(), DC, Loc, Name.getAsIdentifierInfo(), T,
+ NamedChain, D->getChainingSize());
+
+ for (const auto *Attr : D->attrs())
+ ToIndirectField->addAttr(Attr->clone(Importer.getToContext()));
+
ToIndirectField->setAccess(D->getAccess());
ToIndirectField->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToIndirectField);
@@ -2978,7 +2983,7 @@ Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
// Determine whether we've already imported this ivar
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (ObjCIvarDecl *FoundIvar = dyn_cast<ObjCIvarDecl>(FoundDecls[I])) {
if (Importer.IsStructurallyEquivalent(D->getType(),
@@ -3033,7 +3038,7 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
@@ -3203,7 +3208,7 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
return nullptr;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (ObjCMethodDecl *FoundMethod = dyn_cast<ObjCMethodDecl>(FoundDecls[I])) {
if (FoundMethod->isInstanceMethod() != D->isInstanceMethod())
@@ -3439,7 +3444,7 @@ Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
ObjCProtocolDecl *MergeWithProtocol = nullptr;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_ObjCProtocol))
continue;
@@ -3466,6 +3471,36 @@ Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
return ToProto;
}
+Decl *ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ DeclContext *LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+
+ SourceLocation ExternLoc = Importer.Import(D->getExternLoc());
+ SourceLocation LangLoc = Importer.Import(D->getLocation());
+
+ bool HasBraces = D->hasBraces();
+
+ LinkageSpecDecl *ToLinkageSpec =
+ LinkageSpecDecl::Create(Importer.getToContext(),
+ DC,
+ ExternLoc,
+ LangLoc,
+ D->getLanguage(),
+ HasBraces);
+
+ if (HasBraces) {
+ SourceLocation RBraceLoc = Importer.Import(D->getRBraceLoc());
+ ToLinkageSpec->setRBraceLoc(RBraceLoc);
+ }
+
+ ToLinkageSpec->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToLinkageSpec);
+
+ Importer.Imported(D, ToLinkageSpec);
+
+ return ToLinkageSpec;
+}
+
bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
ObjCInterfaceDecl *To,
ImportDefinitionKind Kind) {
@@ -3585,7 +3620,7 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
// Look for an existing interface with the same name.
ObjCInterfaceDecl *MergeWithIface = nullptr;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
@@ -3739,7 +3774,7 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
// Check whether we have already imported this property.
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (ObjCPropertyDecl *FoundProp
= dyn_cast<ObjCPropertyDecl>(FoundDecls[I])) {
@@ -3972,7 +4007,7 @@ Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
@@ -4161,7 +4196,7 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
"Variable templates cannot be declared at function scope");
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
@@ -4371,7 +4406,7 @@ Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
Importer.Import(E->getQualifierLoc()),
Importer.Import(E->getTemplateKeywordLoc()),
ToD,
- E->refersToEnclosingLocal(),
+ E->refersToEnclosingVariableOrCapture(),
Importer.Import(E->getLocation()),
T, E->getValueKind(),
FoundD,
@@ -4760,6 +4795,13 @@ NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
case NestedNameSpecifier::Global:
return NestedNameSpecifier::GlobalSpecifier(ToContext);
+ case NestedNameSpecifier::Super:
+ if (CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(Import(FromNNS->getAsRecordDecl()))) {
+ return NestedNameSpecifier::SuperSpecifier(ToContext, RD);
+ }
+ return nullptr;
+
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate: {
QualType T = Import(QualType(FromNNS->getAsType(), 0u));
@@ -4882,7 +4924,10 @@ SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
FromLoc = FromSM.getSpellingLoc(FromLoc);
std::pair<FileID, unsigned> Decomposed = FromSM.getDecomposedLoc(FromLoc);
SourceManager &ToSM = ToContext.getSourceManager();
- return ToSM.getLocForStartOfFile(Import(Decomposed.first))
+ FileID ToFileID = Import(Decomposed.first);
+ if (ToFileID.isInvalid())
+ return SourceLocation();
+ return ToSM.getLocForStartOfFile(ToFileID)
.getLocWithOffset(Decomposed.second);
}
@@ -4913,16 +4958,19 @@ FileID ASTImporter::Import(FileID FromID) {
// FIXME: We definitely want to re-use the existing MemoryBuffer, rather
// than mmap the files several times.
const FileEntry *Entry = ToFileManager.getFile(Cache->OrigEntry->getName());
+ if (!Entry)
+ return FileID();
ToID = ToSM.createFileID(Entry, ToIncludeLoc,
FromSLoc.getFile().getFileCharacteristic());
} else {
// FIXME: We want to re-use the existing MemoryBuffer!
const llvm::MemoryBuffer *
FromBuf = Cache->getBuffer(FromContext.getDiagnostics(), FromSM);
- llvm::MemoryBuffer *ToBuf
+ std::unique_ptr<llvm::MemoryBuffer> ToBuf
= llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(),
FromBuf->getBufferIdentifier());
- ToID = ToSM.createFileID(ToBuf, FromSLoc.getFile().getFileCharacteristic());
+ ToID = ToSM.createFileID(std::move(ToBuf),
+ FromSLoc.getFile().getFileCharacteristic());
}
diff --git a/lib/AST/ASTTypeTraits.cpp b/lib/AST/ASTTypeTraits.cpp
index baa8e48779a2..ec0671ceb1b5 100644
--- a/lib/AST/ASTTypeTraits.cpp
+++ b/lib/AST/ASTTypeTraits.cpp
@@ -62,6 +62,53 @@ bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived,
StringRef ASTNodeKind::asStringRef() const { return AllKindInfo[KindId].Name; }
+ASTNodeKind ASTNodeKind::getMostDerivedType(ASTNodeKind Kind1,
+ ASTNodeKind Kind2) {
+ if (Kind1.isBaseOf(Kind2)) return Kind2;
+ if (Kind2.isBaseOf(Kind1)) return Kind1;
+ return ASTNodeKind();
+}
+
+ASTNodeKind ASTNodeKind::getMostDerivedCommonAncestor(ASTNodeKind Kind1,
+ ASTNodeKind Kind2) {
+ NodeKindId Parent = Kind1.KindId;
+ while (!isBaseOf(Parent, Kind2.KindId, nullptr) && Parent != NKI_None) {
+ Parent = AllKindInfo[Parent].ParentId;
+ }
+ return ASTNodeKind(Parent);
+}
+
+ASTNodeKind ASTNodeKind::getFromNode(const Decl &D) {
+ switch (D.getKind()) {
+#define DECL(DERIVED, BASE) \
+ case Decl::DERIVED: return ASTNodeKind(NKI_##DERIVED##Decl);
+#define ABSTRACT_DECL(D)
+#include "clang/AST/DeclNodes.inc"
+ };
+ llvm_unreachable("invalid decl kind");
+}
+
+ASTNodeKind ASTNodeKind::getFromNode(const Stmt &S) {
+ switch (S.getStmtClass()) {
+ case Stmt::NoStmtClass: return NKI_None;
+#define STMT(CLASS, PARENT) \
+ case Stmt::CLASS##Class: return ASTNodeKind(NKI_##CLASS);
+#define ABSTRACT_STMT(S)
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("invalid stmt kind");
+}
+
+ASTNodeKind ASTNodeKind::getFromNode(const Type &T) {
+ switch (T.getTypeClass()) {
+#define TYPE(Class, Base) \
+ case Type::Class: return ASTNodeKind(NKI_##Class##Type);
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ }
+ llvm_unreachable("invalid type kind");
+}
+
void DynTypedNode::print(llvm::raw_ostream &OS,
const PrintingPolicy &PP) const {
if (const TemplateArgument *TA = get<TemplateArgument>())
diff --git a/lib/AST/CMakeLists.txt b/lib/AST/CMakeLists.txt
index 9006be64f73f..6ce347b4a31c 100644
--- a/lib/AST/CMakeLists.txt
+++ b/lib/AST/CMakeLists.txt
@@ -35,7 +35,6 @@ add_clang_library(clangAST
ItaniumCXXABI.cpp
ItaniumMangle.cpp
Mangle.cpp
- MangleNumberingContext.cpp
MicrosoftCXXABI.cpp
MicrosoftMangle.cpp
NestedNameSpecifier.cpp
diff --git a/lib/AST/CXXABI.h b/lib/AST/CXXABI.h
index 12b929b88db0..8e9e358525e8 100644
--- a/lib/AST/CXXABI.h
+++ b/lib/AST/CXXABI.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_CXXABI_H
-#define LLVM_CLANG_AST_CXXABI_H
+#ifndef LLVM_CLANG_LIB_AST_CXXABI_H
+#define LLVM_CLANG_LIB_AST_CXXABI_H
#include "clang/AST/Type.h"
diff --git a/lib/AST/Comment.cpp b/lib/AST/Comment.cpp
index 4f433467f9bc..d05c5de543ff 100644
--- a/lib/AST/Comment.cpp
+++ b/lib/AST/Comment.cpp
@@ -157,8 +157,7 @@ void DeclInfo::fill() {
case Decl::CXXConversion: {
const FunctionDecl *FD = cast<FunctionDecl>(CommentDecl);
Kind = FunctionKind;
- ParamVars = ArrayRef<const ParmVarDecl *>(FD->param_begin(),
- FD->getNumParams());
+ ParamVars = llvm::makeArrayRef(FD->param_begin(), FD->getNumParams());
ReturnType = FD->getReturnType();
unsigned NumLists = FD->getNumTemplateParameterLists();
if (NumLists != 0) {
@@ -178,8 +177,7 @@ void DeclInfo::fill() {
case Decl::ObjCMethod: {
const ObjCMethodDecl *MD = cast<ObjCMethodDecl>(CommentDecl);
Kind = FunctionKind;
- ParamVars = ArrayRef<const ParmVarDecl *>(MD->param_begin(),
- MD->param_size());
+ ParamVars = llvm::makeArrayRef(MD->param_begin(), MD->param_size());
ReturnType = MD->getReturnType();
IsObjCMethod = true;
IsInstanceMethod = MD->isInstanceMethod();
@@ -191,8 +189,7 @@ void DeclInfo::fill() {
Kind = FunctionKind;
TemplateKind = Template;
const FunctionDecl *FD = FTD->getTemplatedDecl();
- ParamVars = ArrayRef<const ParmVarDecl *>(FD->param_begin(),
- FD->getNumParams());
+ ParamVars = llvm::makeArrayRef(FD->param_begin(), FD->getNumParams());
ReturnType = FD->getReturnType();
TemplateParameters = FTD->getTemplateParameters();
break;
@@ -278,9 +275,7 @@ void DeclInfo::fill() {
// Is this a typedef for a function type?
if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
Kind = FunctionKind;
- ArrayRef<ParmVarDecl *> Params = FTL.getParams();
- ParamVars = ArrayRef<const ParmVarDecl *>(Params.data(),
- Params.size());
+ ParamVars = FTL.getParams();
ReturnType = FTL.getReturnLoc().getType();
break;
}
@@ -299,9 +294,7 @@ void DeclInfo::fill() {
TypeLoc TL = MaybeFunctionTSI->getTypeLoc().getUnqualifiedLoc();
if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
Kind = FunctionKind;
- ArrayRef<ParmVarDecl *> Params = FTL.getParams();
- ParamVars = ArrayRef<const ParmVarDecl *>(Params.data(),
- Params.size());
+ ParamVars = FTL.getParams();
ReturnType = FTL.getReturnLoc().getType();
}
break;
diff --git a/lib/AST/CommentCommandTraits.cpp b/lib/AST/CommentCommandTraits.cpp
index a7b07a40c985..7378a7c3ac06 100644
--- a/lib/AST/CommentCommandTraits.cpp
+++ b/lib/AST/CommentCommandTraits.cpp
@@ -89,6 +89,10 @@ CommandInfo *CommandTraits::createCommandInfoWithName(StringRef CommandName) {
// Value-initialize (=zero-initialize in this case) a new CommandInfo.
CommandInfo *Info = new (Allocator) CommandInfo();
Info->Name = Name;
+ // We only have a limited number of bits to encode command IDs in the
+ // CommandInfo structure, so the ID numbers can potentially wrap around.
+ assert((NextID < (1 << CommandInfo::NumCommandIDBits))
+ && "Too many commands. We have limited bits for the command ID.");
Info->ID = NextID++;
RegisteredCommands.push_back(Info);
diff --git a/lib/AST/CommentLexer.cpp b/lib/AST/CommentLexer.cpp
index 792a8320449b..06a08bdad45f 100644
--- a/lib/AST/CommentLexer.cpp
+++ b/lib/AST/CommentLexer.cpp
@@ -362,7 +362,7 @@ void Lexer::lexCommentText(Token &T) {
}
}
- const StringRef CommandName(BufferPtr + 1, Length);
+ StringRef CommandName(BufferPtr + 1, Length);
const CommandInfo *Info = Traits.getCommandInfoOrNULL(CommandName);
if (!Info) {
@@ -531,7 +531,7 @@ void Lexer::lexVerbatimLineText(Token &T) {
// Extract current line.
const char *Newline = findNewline(BufferPtr, CommentEnd);
- const StringRef Text(BufferPtr, Newline - BufferPtr);
+ StringRef Text(BufferPtr, Newline - BufferPtr);
formTokenWithChars(T, Newline, tok::verbatim_line_text);
T.setVerbatimLineText(Text);
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index 7448de2ffbc9..e43c28af9b69 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -29,6 +29,7 @@
#include "clang/Basic/Module.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
@@ -38,6 +39,11 @@ Decl *clang::getPrimaryMergedDecl(Decl *D) {
return D->getASTContext().getPrimaryMergedDecl(D);
}
+// Defined here so that it can be inlined into its direct callers.
+bool Decl::isOutOfLine() const {
+ return !getLexicalDeclContext()->Equals(getDeclContext());
+}
+
//===----------------------------------------------------------------------===//
// NamedDecl Implementation
//===----------------------------------------------------------------------===//
@@ -613,9 +619,12 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
// Explicitly declared static.
if (Function->getCanonicalDecl()->getStorageClass() == SC_Static)
return LinkageInfo(InternalLinkage, DefaultVisibility, false);
+ } else if (const auto *IFD = dyn_cast<IndirectFieldDecl>(D)) {
+ // - a data member of an anonymous union.
+ const VarDecl *VD = IFD->getVarDecl();
+ assert(VD && "Expected a VarDecl in this IndirectFieldDecl!");
+ return getLVForNamespaceScopeDecl(VD, computation);
}
- // - a data member of an anonymous union.
- assert(!isa<IndirectFieldDecl>(D) && "Didn't expect an IndirectFieldDecl!");
assert(!isa<FieldDecl>(D) && "Didn't expect a FieldDecl!");
if (D->isInAnonymousNamespace()) {
@@ -811,6 +820,8 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
// Everything not covered here has no linkage.
} else {
+ // FIXME: A typedef declaration has linkage if it gives a type a name for
+ // linkage purposes.
return LinkageInfo::none();
}
@@ -994,6 +1005,19 @@ bool NamedDecl::isLinkageValid() const {
getCachedLinkage();
}
+ObjCStringFormatFamily NamedDecl::getObjCFStringFormattingFamily() const {
+ StringRef name = getName();
+ if (name.empty()) return SFF_None;
+
+ if (name.front() == 'C')
+ if (name == "CFStringCreateWithFormat" ||
+ name == "CFStringCreateWithFormatAndArguments" ||
+ name == "CFStringAppendFormat" ||
+ name == "CFStringAppendFormatAndArguments")
+ return SFF_CFString;
+ return SFF_None;
+}
+
Linkage NamedDecl::getLinkageInternal() const {
// We don't care about visibility here, so ask for the cheapest
// possible visibility analysis.
@@ -1168,7 +1192,7 @@ static LinkageInfo getLVForLocalDecl(const NamedDecl *D,
} else {
const FunctionDecl *FD = cast<FunctionDecl>(OuterD);
if (!FD->isInlined() &&
- FD->getTemplateSpecializationKind() == TSK_Undeclared)
+ !isTemplateInstantiation(FD->getTemplateSpecializationKind()))
return LinkageInfo::none();
LV = getLVForDecl(FD, computation);
@@ -2607,7 +2631,7 @@ void FunctionDecl::setDeclsInPrototypeScope(ArrayRef<NamedDecl *> NewDecls) {
if (!NewDecls.empty()) {
NamedDecl **A = new (getASTContext()) NamedDecl*[NewDecls.size()];
std::copy(NewDecls.begin(), NewDecls.end(), A);
- DeclsInPrototypeScope = ArrayRef<NamedDecl *>(A, NewDecls.size());
+ DeclsInPrototypeScope = llvm::makeArrayRef(A, NewDecls.size());
// Move declarations introduced in prototype to the function context.
for (auto I : NewDecls) {
DeclContext *DC = I->getDeclContext();
@@ -3168,8 +3192,11 @@ unsigned FunctionDecl::getMemoryFunctionKind() const {
return Builtin::BImemmove;
case Builtin::BIstrlcpy:
+ case Builtin::BI__builtin___strlcpy_chk:
return Builtin::BIstrlcpy;
+
case Builtin::BIstrlcat:
+ case Builtin::BI__builtin___strlcat_chk:
return Builtin::BIstrlcat;
case Builtin::BI__builtin_memcmp:
@@ -3261,7 +3288,7 @@ bool FieldDecl::isAnonymousStructOrUnion() const {
unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const {
assert(isBitField() && "not a bitfield");
- Expr *BitWidth = InitializerOrBitWidth.getPointer();
+ Expr *BitWidth = static_cast<Expr *>(InitStorage.getPointer());
return BitWidth->EvaluateKnownConstInt(Ctx).getZExtValue();
}
@@ -3275,30 +3302,39 @@ unsigned FieldDecl::getFieldIndex() const {
unsigned Index = 0;
const RecordDecl *RD = getParent();
- for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
- I != E; ++I, ++Index)
- I->getCanonicalDecl()->CachedFieldIndex = Index + 1;
+ for (auto *Field : RD->fields()) {
+ Field->getCanonicalDecl()->CachedFieldIndex = Index + 1;
+ ++Index;
+ }
assert(CachedFieldIndex && "failed to find field in parent");
return CachedFieldIndex - 1;
}
SourceRange FieldDecl::getSourceRange() const {
- if (const Expr *E = InitializerOrBitWidth.getPointer())
- return SourceRange(getInnerLocStart(), E->getLocEnd());
- return DeclaratorDecl::getSourceRange();
-}
+ switch (InitStorage.getInt()) {
+ // All three of these cases store an optional Expr*.
+ case ISK_BitWidthOrNothing:
+ case ISK_InClassCopyInit:
+ case ISK_InClassListInit:
+ if (const Expr *E = static_cast<const Expr *>(InitStorage.getPointer()))
+ return SourceRange(getInnerLocStart(), E->getLocEnd());
+ // FALLTHROUGH
-void FieldDecl::setBitWidth(Expr *Width) {
- assert(!InitializerOrBitWidth.getPointer() && !hasInClassInitializer() &&
- "bit width or initializer already set");
- InitializerOrBitWidth.setPointer(Width);
+ case ISK_CapturedVLAType:
+ return DeclaratorDecl::getSourceRange();
+ }
+ llvm_unreachable("bad init storage kind");
}
-void FieldDecl::setInClassInitializer(Expr *Init) {
- assert(!InitializerOrBitWidth.getPointer() && hasInClassInitializer() &&
- "bit width or initializer already set");
- InitializerOrBitWidth.setPointer(Init);
+void FieldDecl::setCapturedVLAType(const VariableArrayType *VLAType) {
+ assert((getParent()->isLambda() || getParent()->isCapturedRecord()) &&
+ "capturing type in non-lambda or captured record.");
+ assert(InitStorage.getInt() == ISK_BitWidthOrNothing &&
+ InitStorage.getPointer() == nullptr &&
+ "bit width, initializer or captured type already set");
+ InitStorage.setPointerAndInt(const_cast<VariableArrayType *>(VLAType),
+ ISK_CapturedVLAType);
}
//===----------------------------------------------------------------------===//
@@ -3521,6 +3557,20 @@ bool RecordDecl::isInjectedClassName() const {
cast<RecordDecl>(getDeclContext())->getDeclName() == getDeclName();
}
+bool RecordDecl::isLambda() const {
+ if (auto RD = dyn_cast<CXXRecordDecl>(this))
+ return RD->isLambda();
+ return false;
+}
+
+bool RecordDecl::isCapturedRecord() const {
+ return hasAttr<CapturedRecordAttr>();
+}
+
+void RecordDecl::setCapturedRecord() {
+ addAttr(CapturedRecordAttr::CreateImplicit(getASTContext()));
+}
+
RecordDecl::field_iterator RecordDecl::field_begin() const {
if (hasExternalLexicalStorage() && !LoadedFieldsFromExternalStorage)
LoadFieldsFromExternalStorage();
@@ -3578,6 +3628,64 @@ void RecordDecl::LoadFieldsFromExternalStorage() const {
/*FieldsAlreadyLoaded=*/false);
}
+bool RecordDecl::mayInsertExtraPadding(bool EmitRemark) const {
+ ASTContext &Context = getASTContext();
+ if (!Context.getLangOpts().Sanitize.has(SanitizerKind::Address) ||
+ !Context.getLangOpts().SanitizeAddressFieldPadding)
+ return false;
+ const auto &Blacklist = Context.getSanitizerBlacklist();
+ const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(this);
+ // We may be able to relax some of these requirements.
+ int ReasonToReject = -1;
+ if (!CXXRD || CXXRD->isExternCContext())
+ ReasonToReject = 0; // is not C++.
+ else if (CXXRD->hasAttr<PackedAttr>())
+ ReasonToReject = 1; // is packed.
+ else if (CXXRD->isUnion())
+ ReasonToReject = 2; // is a union.
+ else if (CXXRD->isTriviallyCopyable())
+ ReasonToReject = 3; // is trivially copyable.
+ else if (CXXRD->hasTrivialDestructor())
+ ReasonToReject = 4; // has trivial destructor.
+ else if (CXXRD->isStandardLayout())
+ ReasonToReject = 5; // is standard layout.
+ else if (Blacklist.isBlacklistedLocation(getLocation(), "field-padding"))
+ ReasonToReject = 6; // is in a blacklisted file.
+ else if (Blacklist.isBlacklistedType(getQualifiedNameAsString(),
+ "field-padding"))
+ ReasonToReject = 7; // is blacklisted.
+
+ if (EmitRemark) {
+ if (ReasonToReject >= 0)
+ Context.getDiagnostics().Report(
+ getLocation(),
+ diag::remark_sanitize_address_insert_extra_padding_rejected)
+ << getQualifiedNameAsString() << ReasonToReject;
+ else
+ Context.getDiagnostics().Report(
+ getLocation(),
+ diag::remark_sanitize_address_insert_extra_padding_accepted)
+ << getQualifiedNameAsString();
+ }
+ return ReasonToReject < 0;
+}
+
+const FieldDecl *RecordDecl::findFirstNamedDataMember() const {
+ for (const auto *I : fields()) {
+ if (I->getIdentifier())
+ return I;
+
+ if (const RecordType *RT = I->getType()->getAs<RecordType>())
+ if (const FieldDecl *NamedDataMember =
+ RT->getDecl()->findFirstNamedDataMember())
+ return NamedDataMember;
+ }
+
+ // We didn't find a named data member.
+ return nullptr;
+}
+
+
//===----------------------------------------------------------------------===//
// BlockDecl Implementation
//===----------------------------------------------------------------------===//
@@ -3657,6 +3765,13 @@ LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
SourceLocation());
}
+void LabelDecl::setMSAsmLabel(StringRef Name) {
+ char *Buffer = new (getASTContext(), 1) char[Name.size() + 1];
+ memcpy(Buffer, Name.data(), Name.size());
+ Buffer[Name.size()] = '\0';
+ MSAsmName = Buffer;
+}
+
void ValueDecl::anchor() { }
bool ValueDecl::isWeak() const {
@@ -3892,8 +4007,8 @@ ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
const SourceLocation *StoredLocs
= reinterpret_cast<const SourceLocation *>(this + 1);
- return ArrayRef<SourceLocation>(StoredLocs,
- getNumModuleIdentifiers(getImportedModule()));
+ return llvm::makeArrayRef(StoredLocs,
+ getNumModuleIdentifiers(getImportedModule()));
}
SourceRange ImportDecl::getSourceRange() const {
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index 2b1506d191d1..a46787fb0e8f 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -374,8 +374,10 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
if (Message) {
Message->clear();
llvm::raw_string_ostream Out(*Message);
+ VersionTuple VTI(A->getIntroduced());
+ VTI.UseDotAsSeparator();
Out << "introduced in " << PrettyPlatformName << ' '
- << A->getIntroduced() << HintMessage;
+ << VTI << HintMessage;
}
return AR_NotYetIntroduced;
@@ -386,8 +388,10 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
if (Message) {
Message->clear();
llvm::raw_string_ostream Out(*Message);
+ VersionTuple VTO(A->getObsoleted());
+ VTO.UseDotAsSeparator();
Out << "obsoleted in " << PrettyPlatformName << ' '
- << A->getObsoleted() << HintMessage;
+ << VTO << HintMessage;
}
return AR_Unavailable;
@@ -398,8 +402,10 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
if (Message) {
Message->clear();
llvm::raw_string_ostream Out(*Message);
+ VersionTuple VTD(A->getDeprecated());
+ VTD.UseDotAsSeparator();
Out << "first deprecated in " << PrettyPlatformName << ' '
- << A->getDeprecated() << HintMessage;
+ << VTD << HintMessage;
}
return AR_Deprecated;
@@ -1296,6 +1302,11 @@ DeclContext::lookup(DeclarationName Name) {
if (PrimaryContext != this)
return PrimaryContext->lookup(Name);
+ // If this is a namespace, ensure that any later redeclarations of it have
+ // been loaded, since they may add names to the result of this lookup.
+ if (auto *ND = dyn_cast<NamespaceDecl>(this))
+ (void)ND->getMostRecentDecl();
+
if (hasExternalVisibleStorage()) {
if (NeedToReconcileExternalVisibleStorage)
reconcileExternalVisibleStorage();
@@ -1430,6 +1441,17 @@ DeclContext *DeclContext::getEnclosingNamespaceContext() {
return Ctx->getPrimaryContext();
}
+RecordDecl *DeclContext::getOuterLexicalRecordContext() {
+ // Loop until we find a non-record context.
+ RecordDecl *OutermostRD = nullptr;
+ DeclContext *DC = this;
+ while (DC->isRecord()) {
+ OutermostRD = cast<RecordDecl>(DC);
+ DC = DC->getLexicalParent();
+ }
+ return OutermostRD;
+}
+
bool DeclContext::InEnclosingNamespaceSetOf(const DeclContext *O) const {
// For non-file contexts, this is equivalent to Equals.
if (!isFileContext())
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index ed26c5262a7b..a6d9d411eef3 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -209,7 +209,7 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// Now go through all virtual bases of this base and add them.
for (const auto &VBase : BaseClassDecl->vbases()) {
// Add this base if it's not already in the list.
- if (SeenVBaseTypes.insert(C.getCanonicalType(VBase.getType()))) {
+ if (SeenVBaseTypes.insert(C.getCanonicalType(VBase.getType())).second) {
VBases.push_back(&VBase);
// C++11 [class.copy]p8:
@@ -225,7 +225,7 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
if (Base->isVirtual()) {
// Add this base if it's not already in the list.
- if (SeenVBaseTypes.insert(C.getCanonicalType(BaseType)))
+ if (SeenVBaseTypes.insert(C.getCanonicalType(BaseType)).second)
VBases.push_back(Base);
// C++0x [meta.unary.prop] is_empty:
@@ -677,17 +677,24 @@ void CXXRecordDecl::addedMember(Decl *D) {
//
// Automatic Reference Counting: the presence of a member of Objective-C pointer type
// that does not explicitly have no lifetime makes the class a non-POD.
- // However, we delay setting PlainOldData to false in this case so that
- // Sema has a chance to diagnostic causes where the same class will be
- // non-POD with Automatic Reference Counting but a POD without ARC.
- // In this case, the class will become a non-POD class when we complete
- // the definition.
ASTContext &Context = getASTContext();
QualType T = Context.getBaseElementType(Field->getType());
if (T->isObjCRetainableType() || T.isObjCGCStrong()) {
- if (!Context.getLangOpts().ObjCAutoRefCount ||
- T.getObjCLifetime() != Qualifiers::OCL_ExplicitNone)
+ if (!Context.getLangOpts().ObjCAutoRefCount) {
setHasObjectMember(true);
+ } else if (T.getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
+ // Objective-C Automatic Reference Counting:
+ // If a class has a non-static data member of Objective-C pointer
+ // type (or array thereof), it is a non-POD type and its
+ // default constructor (if any), copy constructor, move constructor,
+ // copy assignment operator, move assignment operator, and destructor are
+ // non-trivial.
+ setHasObjectMember(true);
+ struct DefinitionData &Data = data();
+ Data.PlainOldData = false;
+ Data.HasTrivialSpecialMembers = 0;
+ Data.HasIrrelevantDestructor = false;
+ }
} else if (!T.isCXX98PODType(Context))
data().PlainOldData = false;
@@ -720,7 +727,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
// brace-or-equal-initializers for non-static data members.
//
// This rule was removed in C++1y.
- if (!getASTContext().getLangOpts().CPlusPlus1y)
+ if (!getASTContext().getLangOpts().CPlusPlus14)
data().Aggregate = false;
// C++11 [class]p10:
@@ -1254,6 +1261,44 @@ CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
llvm_unreachable("Not a class template or member class specialization");
}
+const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const {
+ // If it's a class template specialization, find the template or partial
+ // specialization from which it was instantiated.
+ if (auto *TD = dyn_cast<ClassTemplateSpecializationDecl>(this)) {
+ auto From = TD->getInstantiatedFrom();
+ if (auto *CTD = From.dyn_cast<ClassTemplateDecl *>()) {
+ while (auto *NewCTD = CTD->getInstantiatedFromMemberTemplate()) {
+ if (NewCTD->isMemberSpecialization())
+ break;
+ CTD = NewCTD;
+ }
+ return CTD->getTemplatedDecl();
+ }
+ if (auto *CTPSD =
+ From.dyn_cast<ClassTemplatePartialSpecializationDecl *>()) {
+ while (auto *NewCTPSD = CTPSD->getInstantiatedFromMember()) {
+ if (NewCTPSD->isMemberSpecialization())
+ break;
+ CTPSD = NewCTPSD;
+ }
+ return CTPSD;
+ }
+ }
+
+ if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo()) {
+ if (isTemplateInstantiation(MSInfo->getTemplateSpecializationKind())) {
+ const CXXRecordDecl *RD = this;
+ while (auto *NewRD = RD->getInstantiatedFromMemberClass())
+ RD = NewRD;
+ return RD;
+ }
+ }
+
+ assert(!isTemplateInstantiation(this->getTemplateSpecializationKind()) &&
+ "couldn't find pattern for class template instantiation");
+ return nullptr;
+}
+
CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
ASTContext &Context = getASTContext();
QualType ClassType = Context.getTypeDeclType(this);
@@ -1277,19 +1322,6 @@ void CXXRecordDecl::completeDefinition() {
void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
RecordDecl::completeDefinition();
- if (hasObjectMember() && getASTContext().getLangOpts().ObjCAutoRefCount) {
- // Objective-C Automatic Reference Counting:
- // If a class has a non-static data member of Objective-C pointer
- // type (or array thereof), it is a non-POD type and its
- // default constructor (if any), copy constructor, move constructor,
- // copy assignment operator, move assignment operator, and destructor are
- // non-trivial.
- struct DefinitionData &Data = data();
- Data.PlainOldData = false;
- Data.HasTrivialSpecialMembers = 0;
- Data.HasIrrelevantDestructor = false;
- }
-
// If the class may be abstract (but hasn't been marked as such), check for
// any pure final overriders.
if (mayBeAbstract()) {
@@ -1799,7 +1831,6 @@ bool CXXConstructorDecl::isConvertingConstructor(bool AllowExplicit) const {
bool CXXConstructorDecl::isSpecializationCopyingObject() const {
if ((getNumParams() < 1) ||
(getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
- (getPrimaryTemplate() == nullptr) ||
(getDescribedFunctionTemplate() != nullptr))
return false;
@@ -1970,6 +2001,16 @@ NamespaceDecl *NamespaceDecl::getMostRecentDeclImpl() {
void NamespaceAliasDecl::anchor() { }
+NamespaceAliasDecl *NamespaceAliasDecl::getNextRedeclarationImpl() {
+ return getNextRedeclaration();
+}
+NamespaceAliasDecl *NamespaceAliasDecl::getPreviousDeclImpl() {
+ return getPreviousDecl();
+}
+NamespaceAliasDecl *NamespaceAliasDecl::getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+}
+
NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation UsingLoc,
SourceLocation AliasLoc,
@@ -1977,15 +2018,16 @@ NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation IdentLoc,
NamedDecl *Namespace) {
+ // FIXME: Preserve the aliased namespace as written.
if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Namespace))
Namespace = NS->getOriginalNamespace();
- return new (C, DC) NamespaceAliasDecl(DC, UsingLoc, AliasLoc, Alias,
+ return new (C, DC) NamespaceAliasDecl(C, DC, UsingLoc, AliasLoc, Alias,
QualifierLoc, IdentLoc, Namespace);
}
NamespaceAliasDecl *
NamespaceAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- return new (C, ID) NamespaceAliasDecl(nullptr, SourceLocation(),
+ return new (C, ID) NamespaceAliasDecl(C, nullptr, SourceLocation(),
SourceLocation(), nullptr,
NestedNameSpecifierLoc(),
SourceLocation(), nullptr);
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index 2204dff13739..ed5367514c30 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -93,13 +93,13 @@ ObjCContainerDecl::getMethod(Selector Sel, bool isInstance,
return nullptr;
}
-/// HasUserDeclaredSetterMethod - This routine returns 'true' if a user declared setter
-/// method was found in the class, its protocols, its super classes or categories.
-/// It also returns 'true' if one of its categories has declared a 'readwrite' property.
-/// This is because, user must provide a setter method for the category's 'readwrite'
-/// property.
-bool
-ObjCContainerDecl::HasUserDeclaredSetterMethod(const ObjCPropertyDecl *Property) const {
+/// \brief This routine returns 'true' if a user declared setter method was
+/// found in the class, its protocols, its super classes or categories.
+/// It also returns 'true' if one of its categories has declared a 'readwrite'
+/// property. This is because, user must provide a setter method for the
+/// category's 'readwrite' property.
+bool ObjCContainerDecl::HasUserDeclaredSetterMethod(
+ const ObjCPropertyDecl *Property) const {
Selector Sel = Property->getSetterName();
lookup_const_result R = lookup(Sel);
for (lookup_const_iterator Meth = R.begin(), MethEnd = R.end();
@@ -118,9 +118,10 @@ ObjCContainerDecl::HasUserDeclaredSetterMethod(const ObjCPropertyDecl *Property)
return true;
if (Cat->IsClassExtension())
continue;
- // Also search through the categories looking for a 'readwrite' declaration
- // of this property. If one found, presumably a setter will be provided
- // (properties declared in categories will not get auto-synthesized).
+ // Also search through the categories looking for a 'readwrite'
+ // declaration of this property. If one found, presumably a setter will
+ // be provided (properties declared in categories will not get
+ // auto-synthesized).
for (const auto *P : Cat->properties())
if (P->getIdentifier() == Property->getIdentifier()) {
if (P->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite)
@@ -151,7 +152,7 @@ ObjCContainerDecl::HasUserDeclaredSetterMethod(const ObjCPropertyDecl *Property)
ObjCPropertyDecl *
ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC,
- IdentifierInfo *propertyID) {
+ const IdentifierInfo *propertyID) {
// If this context is a hidden protocol definition, don't find any
// property.
if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(DC)) {
@@ -181,8 +182,8 @@ ObjCPropertyDecl::getDefaultSynthIvarName(ASTContext &Ctx) const {
/// FindPropertyDeclaration - Finds declaration of the property given its name
/// in 'PropertyId' and returns it. It returns 0, if not found.
-ObjCPropertyDecl *
-ObjCContainerDecl::FindPropertyDeclaration(IdentifierInfo *PropertyId) const {
+ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
+ const IdentifierInfo *PropertyId) const {
// Don't find properties within hidden protocol definitions.
if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
@@ -558,36 +559,39 @@ ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
LoadExternalDefinition();
while (ClassDecl) {
+ // 1. Look through primary class.
if ((MethodDecl = ClassDecl->getMethod(Sel, isInstance)))
return MethodDecl;
-
- // Didn't find one yet - look through protocols.
- for (const auto *I : ClassDecl->protocols())
- if ((MethodDecl = I->lookupMethod(Sel, isInstance)))
- return MethodDecl;
- // Didn't find one yet - now look through categories.
- for (const auto *Cat : ClassDecl->visible_categories()) {
+ // 2. Didn't find one yet - now look through categories.
+ for (const auto *Cat : ClassDecl->visible_categories())
if ((MethodDecl = Cat->getMethod(Sel, isInstance)))
if (C != Cat || !MethodDecl->isImplicit())
return MethodDecl;
- if (!shallowCategoryLookup) {
+ // 3. Didn't find one yet - look through primary class's protocols.
+ for (const auto *I : ClassDecl->protocols())
+ if ((MethodDecl = I->lookupMethod(Sel, isInstance)))
+ return MethodDecl;
+
+ // 4. Didn't find one yet - now look through categories' protocols
+ if (!shallowCategoryLookup)
+ for (const auto *Cat : ClassDecl->visible_categories()) {
// Didn't find one yet - look through protocols.
const ObjCList<ObjCProtocolDecl> &Protocols =
- Cat->getReferencedProtocols();
+ Cat->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end(); I != E; ++I)
if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
if (C != Cat || !MethodDecl->isImplicit())
return MethodDecl;
}
- }
-
+
+
if (!followSuper)
return nullptr;
- // Get the super class (if any).
+ // 5. Get to the super class (if any).
ClassDecl = ClassDecl->getSuperClass();
}
return nullptr;
@@ -849,6 +853,11 @@ ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const {
family = OMF_None;
break;
+ case OMF_initialize:
+ if (isInstanceMethod() || !getReturnType()->isVoidType())
+ family = OMF_None;
+ break;
+
case OMF_performSelector:
if (!isInstanceMethod() || !getReturnType()->isObjCIdType())
family = OMF_None;
@@ -952,6 +961,13 @@ ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() {
llvm_unreachable("unknown method context");
}
+SourceRange ObjCMethodDecl::getReturnTypeSourceRange() const {
+ const auto *TSI = getReturnTypeSourceInfo();
+ if (TSI)
+ return TSI->getTypeLoc().getSourceRange();
+ return SourceRange();
+}
+
static void CollectOverriddenMethodsRecurse(const ObjCContainerDecl *Container,
const ObjCMethodDecl *Method,
SmallVectorImpl<const ObjCMethodDecl *> &Methods,
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index e5e5130f695a..c0f3e17693dc 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -87,6 +87,7 @@ namespace {
void PrintTemplateParameters(const TemplateParameterList *Params,
const TemplateArgumentList *Args = nullptr);
void prettyPrintAttributes(Decl *D);
+ void printDeclType(QualType T, StringRef DeclName, bool Pack = false);
};
}
@@ -197,6 +198,17 @@ void DeclPrinter::prettyPrintAttributes(Decl *D) {
}
}
+void DeclPrinter::printDeclType(QualType T, StringRef DeclName, bool Pack) {
+ // Normally, a PackExpansionType is written as T[3]... (for instance, as a
+ // template argument), but if it is the type of a declaration, the ellipsis
+ // is placed before the name being declared.
+ if (auto *PET = T->getAs<PackExpansionType>()) {
+ Pack = true;
+ T = PET->getPattern();
+ }
+ T.print(Out, Policy, (Pack ? "..." : "") + DeclName);
+}
+
void DeclPrinter::ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls) {
this->Indent();
Decl::printGroup(Decls.data(), Decls.size(), Out, Policy, Indentation);
@@ -365,6 +377,9 @@ void DeclPrinter::VisitRecordDecl(RecordDecl *D) {
if (!Policy.SuppressSpecifiers && D->isModulePrivate())
Out << "__module_private__ ";
Out << D->getKindName();
+
+ prettyPrintAttributes(D);
+
if (D->getIdentifier())
Out << ' ' << *D;
@@ -647,7 +662,6 @@ void DeclPrinter::VisitLabelDecl(LabelDecl *D) {
Out << *D << ":";
}
-
void DeclPrinter::VisitVarDecl(VarDecl *D) {
if (!Policy.SuppressSpecifiers) {
StorageClass SC = D->getStorageClass();
@@ -675,7 +689,7 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
QualType T = D->getTypeSourceInfo()
? D->getTypeSourceInfo()->getType()
: D->getASTContext().getUnqualifiedObjCPointerType(D->getType());
- T.print(Out, Policy, D->getName());
+ printDeclType(T, D->getName());
Expr *Init = D->getInit();
if (!Policy.SuppressInitializers && Init) {
bool ImplicitInit = false;
@@ -757,6 +771,9 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
if (!Policy.SuppressSpecifiers && D->isModulePrivate())
Out << "__module_private__ ";
Out << D->getKindName();
+
+ prettyPrintAttributes(D);
+
if (D->getIdentifier())
Out << ' ' << *D;
@@ -773,9 +790,11 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
Out << "virtual ";
AccessSpecifier AS = Base->getAccessSpecifierAsWritten();
- if (AS != AS_none)
+ if (AS != AS_none) {
Print(AS);
- Out << " " << Base->getType().getAsString(Policy);
+ Out << " ";
+ }
+ Out << Base->getType().getAsString(Policy);
if (Base->isPackExpansion())
Out << "...";
@@ -830,7 +849,7 @@ void DeclPrinter::PrintTemplateParameters(const TemplateParameterList *Params,
Out << "class ";
if (TTP->isParameterPack())
- Out << "... ";
+ Out << "...";
Out << *TTP;
@@ -843,15 +862,10 @@ void DeclPrinter::PrintTemplateParameters(const TemplateParameterList *Params,
};
} else if (const NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- Out << NTTP->getType().getAsString(Policy);
-
- if (NTTP->isParameterPack() && !isa<PackExpansionType>(NTTP->getType()))
- Out << "...";
-
- if (IdentifierInfo *Name = NTTP->getIdentifier()) {
- Out << ' ';
- Out << Name->getName();
- }
+ StringRef Name;
+ if (IdentifierInfo *II = NTTP->getIdentifier())
+ Name = II->getName();
+ printDeclType(NTTP->getType(), Name, NTTP->isParameterPack());
if (Args) {
Out << " = ";
@@ -940,11 +954,12 @@ void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) {
if (OMD->isVariadic())
Out << ", ...";
+
+ prettyPrintAttributes(OMD);
if (OMD->getBody() && !Policy.TerseOutput) {
Out << ' ';
OMD->getBody()->printPretty(Out, nullptr, Policy);
- Out << '\n';
}
else if (Policy.PolishForDeclaration)
Out << ';';
@@ -954,6 +969,7 @@ void DeclPrinter::VisitObjCImplementationDecl(ObjCImplementationDecl *OID) {
std::string I = OID->getNameAsString();
ObjCInterfaceDecl *SID = OID->getSuperClass();
+ bool eolnOut = false;
if (SID)
Out << "@implementation " << I << " : " << *SID;
else
@@ -961,6 +977,7 @@ void DeclPrinter::VisitObjCImplementationDecl(ObjCImplementationDecl *OID) {
if (OID->ivar_size() > 0) {
Out << "{\n";
+ eolnOut = true;
Indentation += Policy.Indentation;
for (const auto *I : OID->ivars()) {
Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()).
@@ -969,7 +986,13 @@ void DeclPrinter::VisitObjCImplementationDecl(ObjCImplementationDecl *OID) {
Indentation -= Policy.Indentation;
Out << "}\n";
}
+ else if (SID || (OID->decls_begin() != OID->decls_end())) {
+ Out << "\n";
+ eolnOut = true;
+ }
VisitDeclContext(OID, false);
+ if (!eolnOut)
+ Out << "\n";
Out << "@end";
}
@@ -1008,14 +1031,14 @@ void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) {
Indentation -= Policy.Indentation;
Out << "}\n";
}
- else if (SID) {
+ else if (SID || (OID->decls_begin() != OID->decls_end())) {
Out << "\n";
eolnOut = true;
}
VisitDeclContext(OID, false);
if (!eolnOut)
- Out << ' ';
+ Out << "\n";
Out << "@end";
// FIXME: implement the rest...
}
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 5f559b7e5ce3..712de5056e8d 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -195,12 +195,12 @@ SourceLocation Expr::getExprLoc() const {
case Stmt::NoStmtClass: llvm_unreachable("statement without class");
#define ABSTRACT_STMT(type)
#define STMT(type, base) \
- case Stmt::type##Class: llvm_unreachable(#type " is not an Expr"); break;
+ case Stmt::type##Class: break;
#define EXPR(type, base) \
case Stmt::type##Class: return getExprLocImpl<type>(this, &type::getExprLoc);
#include "clang/AST/StmtNodes.inc"
}
- llvm_unreachable("unknown statement kind");
+ llvm_unreachable("unknown expression kind");
}
//===----------------------------------------------------------------------===//
@@ -221,11 +221,11 @@ static void computeDeclRefDependence(const ASTContext &Ctx, NamedDecl *D,
// (TD) C++ [temp.dep.expr]p3:
// An id-expression is type-dependent if it contains:
//
- // and
+ // and
//
// (VD) C++ [temp.dep.constexpr]p2:
// An identifier is value-dependent if it is:
-
+
// (TD) - an identifier that was declared with dependent type
// (VD) - a name declared with a dependent type,
if (T->isDependentType()) {
@@ -309,29 +309,11 @@ void DeclRefExpr::computeDependence(const ASTContext &Ctx) {
bool InstantiationDependent = false;
computeDeclRefDependence(Ctx, getDecl(), getType(), TypeDependent,
ValueDependent, InstantiationDependent);
-
- // (TD) C++ [temp.dep.expr]p3:
- // An id-expression is type-dependent if it contains:
- //
- // and
- //
- // (VD) C++ [temp.dep.constexpr]p2:
- // An identifier is value-dependent if it is:
- if (!TypeDependent && !ValueDependent &&
- hasExplicitTemplateArgs() &&
- TemplateSpecializationType::anyDependentTemplateArguments(
- getTemplateArgs(),
- getNumTemplateArgs(),
- InstantiationDependent)) {
- TypeDependent = true;
- ValueDependent = true;
- InstantiationDependent = true;
- }
-
- ExprBits.TypeDependent = TypeDependent;
- ExprBits.ValueDependent = ValueDependent;
- ExprBits.InstantiationDependent = InstantiationDependent;
-
+
+ ExprBits.TypeDependent |= TypeDependent;
+ ExprBits.ValueDependent |= ValueDependent;
+ ExprBits.InstantiationDependent |= InstantiationDependent;
+
// Is the declaration a parameter pack?
if (getDecl()->isParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -340,7 +322,7 @@ void DeclRefExpr::computeDependence(const ASTContext &Ctx) {
DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
- ValueDecl *D, bool RefersToEnclosingLocal,
+ ValueDecl *D, bool RefersToEnclosingVariableOrCapture,
const DeclarationNameInfo &NameInfo,
NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs,
@@ -348,14 +330,21 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
: Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
D(D), Loc(NameInfo.getLoc()), DNLoc(NameInfo.getInfo()) {
DeclRefExprBits.HasQualifier = QualifierLoc ? 1 : 0;
- if (QualifierLoc)
+ if (QualifierLoc) {
getInternalQualifierLoc() = QualifierLoc;
+ auto *NNS = QualifierLoc.getNestedNameSpecifier();
+ if (NNS->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (NNS->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ }
DeclRefExprBits.HasFoundDecl = FoundD ? 1 : 0;
if (FoundD)
getInternalFoundDecl() = FoundD;
DeclRefExprBits.HasTemplateKWAndArgsInfo
= (TemplateArgs || TemplateKWLoc.isValid()) ? 1 : 0;
- DeclRefExprBits.RefersToEnclosingLocal = RefersToEnclosingLocal;
+ DeclRefExprBits.RefersToEnclosingVariableOrCapture =
+ RefersToEnclosingVariableOrCapture;
if (TemplateArgs) {
bool Dependent = false;
bool InstantiationDependent = false;
@@ -364,8 +353,9 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
Dependent,
InstantiationDependent,
ContainsUnexpandedParameterPack);
- if (InstantiationDependent)
- setInstantiationDependent(true);
+ assert(!Dependent && "built a DeclRefExpr with dependent template args");
+ ExprBits.InstantiationDependent |= InstantiationDependent;
+ ExprBits.ContainsUnexpandedParameterPack |= ContainsUnexpandedParameterPack;
} else if (TemplateKWLoc.isValid()) {
getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
@@ -378,14 +368,14 @@ DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
ValueDecl *D,
- bool RefersToEnclosingLocal,
+ bool RefersToEnclosingVariableOrCapture,
SourceLocation NameLoc,
QualType T,
ExprValueKind VK,
NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs) {
return Create(Context, QualifierLoc, TemplateKWLoc, D,
- RefersToEnclosingLocal,
+ RefersToEnclosingVariableOrCapture,
DeclarationNameInfo(D->getDeclName(), NameLoc),
T, VK, FoundD, TemplateArgs);
}
@@ -394,7 +384,7 @@ DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
ValueDecl *D,
- bool RefersToEnclosingLocal,
+ bool RefersToEnclosingVariableOrCapture,
const DeclarationNameInfo &NameInfo,
QualType T,
ExprValueKind VK,
@@ -416,7 +406,7 @@ DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
return new (Mem) DeclRefExpr(Context, QualifierLoc, TemplateKWLoc, D,
- RefersToEnclosingLocal,
+ RefersToEnclosingVariableOrCapture,
NameInfo, FoundD, TemplateArgs, T, VK);
}
@@ -448,6 +438,38 @@ SourceLocation DeclRefExpr::getLocEnd() const {
return getNameInfo().getLocEnd();
}
+PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentType IT,
+ StringLiteral *SL)
+ : Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary,
+ FNTy->isDependentType(), FNTy->isDependentType(),
+ FNTy->isInstantiationDependentType(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ Loc(L), Type(IT), FnName(SL) {}
+
+StringLiteral *PredefinedExpr::getFunctionName() {
+ return cast_or_null<StringLiteral>(FnName);
+}
+
+StringRef PredefinedExpr::getIdentTypeName(PredefinedExpr::IdentType IT) {
+ switch (IT) {
+ case Func:
+ return "__func__";
+ case Function:
+ return "__FUNCTION__";
+ case FuncDName:
+ return "__FUNCDNAME__";
+ case LFunction:
+ return "L__FUNCTION__";
+ case PrettyFunction:
+ return "__PRETTY_FUNCTION__";
+ case FuncSig:
+ return "__FUNCSIG__";
+ case PrettyFunctionNoVirtual:
+ break;
+ }
+ llvm_unreachable("Unknown ident type for PredefinedExpr");
+}
+
// FIXME: Maybe this should use DeclPrinter with a special "print predefined
// expr" policy instead.
std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
@@ -477,6 +499,22 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
}
return "";
}
+ if (auto *BD = dyn_cast<BlockDecl>(CurrentDecl)) {
+ std::unique_ptr<MangleContext> MC;
+ MC.reset(Context.createMangleContext());
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ auto DC = CurrentDecl->getDeclContext();
+ if (DC->isFileContext())
+ MC->mangleGlobalBlock(BD, /*ID*/ nullptr, Out);
+ else if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
+ MC->mangleCtorBlock(CD, /*CT*/ Ctor_Complete, BD, Out);
+ else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
+ MC->mangleDtorBlock(DD, /*DT*/ Dtor_Complete, BD, Out);
+ else
+ MC->mangleBlock(DC, BD, Out);
+ return Out.str();
+ }
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
if (IT != PrettyFunction && IT != PrettyFunctionNoVirtual && IT != FuncSig)
return FD->getNameAsString();
@@ -509,6 +547,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
case CC_X86StdCall: POut << "__stdcall "; break;
case CC_X86FastCall: POut << "__fastcall "; break;
case CC_X86ThisCall: POut << "__thiscall "; break;
+ case CC_X86VectorCall: POut << "__vectorcall "; break;
// Only bother printing the conventions that MSVC knows about.
default: break;
}
@@ -600,9 +639,8 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
// type deduction and lambdas. For trailing return types resolve the
// decltype expression. Otherwise print the real type when this is
// not a constructor or destructor.
- if ((isa<CXXMethodDecl>(FD) &&
- cast<CXXMethodDecl>(FD)->getParent()->isLambda()) ||
- (FT && FT->getReturnType()->getAs<AutoType>()))
+ if (isa<CXXMethodDecl>(FD) &&
+ cast<CXXMethodDecl>(FD)->getParent()->isLambda())
Proto = "auto " + Proto;
else if (FT && FT->getReturnType()->getAs<DecltypeType>())
FT->getReturnType()
@@ -1252,7 +1290,7 @@ SourceLocation CallExpr::getLocStart() const {
return cast<CXXOperatorCallExpr>(this)->getLocStart();
SourceLocation begin = getCallee()->getLocStart();
- if (begin.isInvalid() && getNumArgs() > 0)
+ if (begin.isInvalid() && getNumArgs() > 0 && getArg(0))
begin = getArg(0)->getLocStart();
return begin;
}
@@ -1261,7 +1299,7 @@ SourceLocation CallExpr::getLocEnd() const {
return cast<CXXOperatorCallExpr>(this)->getLocEnd();
SourceLocation end = getRParenLoc();
- if (end.isInvalid() && getNumArgs() > 0)
+ if (end.isInvalid() && getNumArgs() > 0 && getArg(getNumArgs() - 1))
end = getArg(getNumArgs() - 1)->getLocEnd();
return end;
}
@@ -2734,10 +2772,9 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
if (ILE->getType()->isRecordType()) {
unsigned ElementNo = 0;
RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
- for (RecordDecl::field_iterator Field = RD->field_begin(),
- FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) {
+ for (const auto *Field : RD->fields()) {
// If this is a union, skip all the fields that aren't being initialized.
- if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
+ if (RD->isUnion() && ILE->getInitializedFieldInUnion() != Field)
continue;
// Don't emit anonymous bitfields, they just affect layout.
@@ -2830,9 +2867,16 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
return false;
}
-bool Expr::HasSideEffects(const ASTContext &Ctx) const {
+bool Expr::HasSideEffects(const ASTContext &Ctx,
+ bool IncludePossibleEffects) const {
+ // In circumstances where we care about definite side effects instead of
+ // potential side effects, we want to ignore expressions that are part of a
+ // macro expansion as a potential side effect.
+ if (!IncludePossibleEffects && getExprLoc().isMacroID())
+ return false;
+
if (isInstantiationDependent())
- return true;
+ return IncludePossibleEffects;
switch (getStmtClass()) {
case NoStmtClass:
@@ -2850,6 +2894,8 @@ bool Expr::HasSideEffects(const ASTContext &Ctx) const {
case PackExpansionExprClass:
case SubstNonTypeTemplateParmPackExprClass:
case FunctionParmPackExprClass:
+ case TypoExprClass:
+ case CXXFoldExprClass:
llvm_unreachable("shouldn't see dependent / unresolved nodes here");
case DeclRefExprClass:
@@ -2883,21 +2929,27 @@ bool Expr::HasSideEffects(const ASTContext &Ctx) const {
return false;
case CallExprClass:
+ case CXXOperatorCallExprClass:
+ case CXXMemberCallExprClass:
+ case CUDAKernelCallExprClass:
+ case BlockExprClass:
+ case CXXBindTemporaryExprClass:
+ case UserDefinedLiteralClass:
+ // We don't know a call definitely has side effects, but we can check the
+ // call's operands.
+ if (!IncludePossibleEffects)
+ break;
+ return true;
+
case MSPropertyRefExprClass:
case CompoundAssignOperatorClass:
case VAArgExprClass:
case AtomicExprClass:
case StmtExprClass:
- case CXXOperatorCallExprClass:
- case CXXMemberCallExprClass:
- case UserDefinedLiteralClass:
case CXXThrowExprClass:
case CXXNewExprClass:
case CXXDeleteExprClass:
case ExprWithCleanupsClass:
- case CXXBindTemporaryExprClass:
- case BlockExprClass:
- case CUDAKernelCallExprClass:
// These always have a side-effect.
return true;
@@ -2933,25 +2985,29 @@ bool Expr::HasSideEffects(const ASTContext &Ctx) const {
case InitListExprClass:
// FIXME: The children for an InitListExpr doesn't include the array filler.
if (const Expr *E = cast<InitListExpr>(this)->getArrayFiller())
- if (E->HasSideEffects(Ctx))
+ if (E->HasSideEffects(Ctx, IncludePossibleEffects))
return true;
break;
case GenericSelectionExprClass:
return cast<GenericSelectionExpr>(this)->getResultExpr()->
- HasSideEffects(Ctx);
+ HasSideEffects(Ctx, IncludePossibleEffects);
case ChooseExprClass:
- return cast<ChooseExpr>(this)->getChosenSubExpr()->HasSideEffects(Ctx);
+ return cast<ChooseExpr>(this)->getChosenSubExpr()->HasSideEffects(
+ Ctx, IncludePossibleEffects);
case CXXDefaultArgExprClass:
- return cast<CXXDefaultArgExpr>(this)->getExpr()->HasSideEffects(Ctx);
+ return cast<CXXDefaultArgExpr>(this)->getExpr()->HasSideEffects(
+ Ctx, IncludePossibleEffects);
- case CXXDefaultInitExprClass:
- if (const Expr *E = cast<CXXDefaultInitExpr>(this)->getExpr())
- return E->HasSideEffects(Ctx);
+ case CXXDefaultInitExprClass: {
+ const FieldDecl *FD = cast<CXXDefaultInitExpr>(this)->getField();
+ if (const Expr *E = FD->getInClassInitializer())
+ return E->HasSideEffects(Ctx, IncludePossibleEffects);
// If we've not yet parsed the initializer, assume it has side-effects.
return true;
+ }
case CXXDynamicCastExprClass: {
// A dynamic_cast expression has side-effects if it can throw.
@@ -2966,6 +3022,13 @@ bool Expr::HasSideEffects(const ASTContext &Ctx) const {
case CXXReinterpretCastExprClass:
case CXXConstCastExprClass:
case CXXFunctionalCastExprClass: {
+ // While volatile reads are side-effecting in both C and C++, we treat them
+ // as having possible (not definite) side-effects. This allows idiomatic
+ // code to behave without warning, such as sizeof(*v) for a volatile-
+ // qualified pointer.
+ if (!IncludePossibleEffects)
+ break;
+
const CastExpr *CE = cast<CastExpr>(this);
if (CE->getCastKind() == CK_LValueToRValue &&
CE->getSubExpr()->getType().isVolatileQualified())
@@ -2981,7 +3044,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx) const {
case CXXConstructExprClass:
case CXXTemporaryObjectExprClass: {
const CXXConstructExpr *CE = cast<CXXConstructExpr>(this);
- if (!CE->getConstructor()->isTrivial())
+ if (!CE->getConstructor()->isTrivial() && IncludePossibleEffects)
return true;
// A trivial constructor does not add any side-effects of its own. Just look
// at its arguments.
@@ -3009,7 +3072,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx) const {
const Expr *Subexpr = *I;
if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Subexpr))
Subexpr = OVE->getSourceExpr();
- if (Subexpr->HasSideEffects(Ctx))
+ if (Subexpr->HasSideEffects(Ctx, IncludePossibleEffects))
return true;
}
return false;
@@ -3018,22 +3081,24 @@ bool Expr::HasSideEffects(const ASTContext &Ctx) const {
case ObjCBoxedExprClass:
case ObjCArrayLiteralClass:
case ObjCDictionaryLiteralClass:
- case ObjCMessageExprClass:
case ObjCSelectorExprClass:
case ObjCProtocolExprClass:
- case ObjCPropertyRefExprClass:
case ObjCIsaExprClass:
case ObjCIndirectCopyRestoreExprClass:
case ObjCSubscriptRefExprClass:
case ObjCBridgedCastExprClass:
- // FIXME: Classify these cases better.
- return true;
+ case ObjCMessageExprClass:
+ case ObjCPropertyRefExprClass:
+ // FIXME: Classify these cases better.
+ if (IncludePossibleEffects)
+ return true;
+ break;
}
// Recurse to children.
for (const_child_range SubStmts = children(); SubStmts; ++SubStmts)
if (const Stmt *S = *SubStmts)
- if (cast<Expr>(S)->HasSideEffects(Ctx))
+ if (cast<Expr>(S)->HasSideEffects(Ctx, IncludePossibleEffects))
return true;
return false;
@@ -3279,6 +3344,10 @@ FieldDecl *Expr::getSourceBitField() {
return BinOp->getRHS()->getSourceBitField();
}
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(E))
+ if (UnOp->isPrefix() && UnOp->isIncrementDecrementOp())
+ return UnOp->getSubExpr()->getSourceBitField();
+
return nullptr;
}
@@ -3759,7 +3828,7 @@ DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
// Compute type- and value-dependence.
Expr *Index = IndexExprs[IndexIdx];
if (Index->isTypeDependent() || Index->isValueDependent())
- ExprBits.ValueDependent = true;
+ ExprBits.TypeDependent = ExprBits.ValueDependent = true;
if (Index->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
// Propagate unexpanded parameter packs.
@@ -3774,7 +3843,7 @@ DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
Expr *End = IndexExprs[IndexIdx + 1];
if (Start->isTypeDependent() || Start->isValueDependent() ||
End->isTypeDependent() || End->isValueDependent()) {
- ExprBits.ValueDependent = true;
+ ExprBits.TypeDependent = ExprBits.ValueDependent = true;
ExprBits.InstantiationDependent = true;
} else if (Start->isInstantiationDependent() ||
End->isInstantiationDependent()) {
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index 64c21dd5c4fd..93361666183b 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -909,16 +909,21 @@ LambdaCapture::LambdaCapture(SourceLocation Loc, bool Implicit,
case LCK_ByRef:
assert(Var && "capture must have a variable!");
break;
+ case LCK_VLAType:
+ assert(!Var && "VLA type capture cannot have a variable!");
+ Bits |= Capture_ByCopy;
+ break;
}
DeclAndBits.setInt(Bits);
}
LambdaCaptureKind LambdaCapture::getCaptureKind() const {
Decl *D = DeclAndBits.getPointer();
+ bool CapByCopy = DeclAndBits.getInt() & Capture_ByCopy;
if (!D)
- return LCK_This;
+ return CapByCopy ? LCK_VLAType : LCK_This;
- return (DeclAndBits.getInt() & Capture_ByCopy) ? LCK_ByCopy : LCK_ByRef;
+ return CapByCopy ? LCK_ByCopy : LCK_ByRef;
}
LambdaExpr::LambdaExpr(QualType T,
@@ -1073,8 +1078,8 @@ LambdaExpr::getCaptureInitIndexVars(capture_init_iterator Iter) const {
"Capture index out-of-range");
VarDecl **IndexVars = getArrayIndexVars();
unsigned *IndexStarts = getArrayIndexStarts();
- return ArrayRef<VarDecl *>(IndexVars + IndexStarts[Index],
- IndexVars + IndexStarts[Index + 1]);
+ return llvm::makeArrayRef(IndexVars + IndexStarts[Index],
+ IndexVars + IndexStarts[Index + 1]);
}
CXXRecordDecl *LambdaExpr::getLambdaClass() const {
@@ -1399,7 +1404,8 @@ CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
// It can't be dependent: after all, we were actually able to do the
// lookup.
CXXRecordDecl *Record = nullptr;
- if (getQualifier()) {
+ auto *NNS = getQualifier();
+ if (NNS && NNS->getKind() != NestedNameSpecifier::Super) {
const Type *T = getQualifier()->getAsType();
assert(T && "qualifier in member expression does not name type");
Record = T->getAsCXXRecordDecl();
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
index d3d25308a386..933ea97fa2ba 100644
--- a/lib/AST/ExprClassification.cpp
+++ b/lib/AST/ExprClassification.cpp
@@ -124,10 +124,11 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::ObjCPropertyRefExprClass:
// C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of...
case Expr::CXXTypeidExprClass:
- // Unresolved lookups get classified as lvalues.
+ // Unresolved lookups and uncorrected typos get classified as lvalues.
// FIXME: Is this wise? Should they get their own kind?
case Expr::UnresolvedLookupExprClass:
case Expr::UnresolvedMemberExprClass:
+ case Expr::TypoExprClass:
case Expr::CXXDependentScopeMemberExprClass:
case Expr::DependentScopeDeclRefExprClass:
// ObjC instance variables are lvalues
@@ -181,6 +182,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::AsTypeExprClass:
case Expr::ObjCIndirectCopyRestoreExprClass:
case Expr::AtomicExprClass:
+ case Expr::CXXFoldExprClass:
return Cl::CL_PRValue;
// Next come the complicated cases.
@@ -613,14 +615,9 @@ static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
return Cl::CM_IncompleteType;
// Records with any const fields (recursively) are not modifiable.
- if (const RecordType *R = CT->getAs<RecordType>()) {
- assert((E->getObjectKind() == OK_ObjCProperty ||
- !Ctx.getLangOpts().CPlusPlus) &&
- "C++ struct assignment should be resolved by the "
- "copy assignment operator.");
+ if (const RecordType *R = CT->getAs<RecordType>())
if (R->hasConstFields())
return Cl::CM_ConstQualified;
- }
return Cl::CM_Modifiable;
}
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index 7d7ca9924c85..3d7f2dca7a2f 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -201,6 +201,7 @@ namespace {
/// Determine whether this is a one-past-the-end pointer.
bool isOnePastTheEnd() const {
+ assert(!Invalid);
if (IsOnePastTheEnd)
return true;
if (MostDerivedArraySize &&
@@ -1308,7 +1309,7 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
}
// Does this refer one past the end of some object?
- if (Designator.isOnePastTheEnd()) {
+ if (!Designator.Invalid && Designator.isOnePastTheEnd()) {
const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
Info.Diag(Loc, diag::note_constexpr_past_end, 1)
<< !Designator.Entries.empty() << !!VD << VD;
@@ -1328,7 +1329,7 @@ static bool CheckLiteralType(EvalInfo &Info, const Expr *E,
// C++1y: A constant initializer for an object o [...] may also invoke
// constexpr constructors for o and its subobjects even if those objects
// are of non-literal class types.
- if (Info.getLangOpts().CPlusPlus1y && This &&
+ if (Info.getLangOpts().CPlusPlus14 && This &&
Info.EvaluatingDecl == This->getLValueBase())
return true;
@@ -1421,6 +1422,17 @@ static bool IsWeakLValue(const LValue &Value) {
return Decl && Decl->isWeak();
}
+static bool isZeroSized(const LValue &Value) {
+ const ValueDecl *Decl = GetLValueBaseDecl(Value);
+ if (Decl && isa<VarDecl>(Decl)) {
+ QualType Ty = Decl->getType();
+ if (Ty->isArrayType())
+ return Ty->isIncompleteType() ||
+ Decl->getASTContext().getTypeSize(Ty) == 0;
+ }
+ return false;
+}
+
static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
// A null base expression indicates a null pointer. These are always
// evaluatable, and they are false unless the offset is zero.
@@ -2020,7 +2032,9 @@ static unsigned getBaseIndex(const CXXRecordDecl *Derived,
/// Extract the value of a character from a string literal.
static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
uint64_t Index) {
- // FIXME: Support PredefinedExpr, ObjCEncodeExpr, MakeStringConstant
+ // FIXME: Support ObjCEncodeExpr, MakeStringConstant
+ if (auto PE = dyn_cast<PredefinedExpr>(Lit))
+ Lit = PE->getFunctionName();
const StringLiteral *S = cast<StringLiteral>(Lit);
const ConstantArrayType *CAT =
Info.Ctx.getAsConstantArrayType(S->getType());
@@ -2079,6 +2093,64 @@ static void expandArray(APValue &Array, unsigned Index) {
Array.swap(NewValue);
}
+/// Determine whether a type would actually be read by an lvalue-to-rvalue
+/// conversion. If it's of class type, we may assume that the copy operation
+/// is trivial. Note that this is never true for a union type with fields
+/// (because the copy always "reads" the active member) and always true for
+/// a non-class type.
+static bool isReadByLvalueToRvalueConversion(QualType T) {
+ CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ if (!RD || (RD->isUnion() && !RD->field_empty()))
+ return true;
+ if (RD->isEmpty())
+ return false;
+
+ for (auto *Field : RD->fields())
+ if (isReadByLvalueToRvalueConversion(Field->getType()))
+ return true;
+
+ for (auto &BaseSpec : RD->bases())
+ if (isReadByLvalueToRvalueConversion(BaseSpec.getType()))
+ return true;
+
+ return false;
+}
+
+/// Diagnose an attempt to read from any unreadable field within the specified
+/// type, which might be a class type.
+static bool diagnoseUnreadableFields(EvalInfo &Info, const Expr *E,
+ QualType T) {
+ CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ if (!RD)
+ return false;
+
+ if (!RD->hasMutableFields())
+ return false;
+
+ for (auto *Field : RD->fields()) {
+ // If we're actually going to read this field in some way, then it can't
+ // be mutable. If we're in a union, then assigning to a mutable field
+ // (even an empty one) can change the active member, so that's not OK.
+ // FIXME: Add core issue number for the union case.
+ if (Field->isMutable() &&
+ (RD->isUnion() || isReadByLvalueToRvalueConversion(Field->getType()))) {
+ Info.Diag(E, diag::note_constexpr_ltor_mutable, 1) << Field;
+ Info.Note(Field->getLocation(), diag::note_declared_at);
+ return true;
+ }
+
+ if (diagnoseUnreadableFields(Info, E, Field->getType()))
+ return true;
+ }
+
+ for (auto &BaseSpec : RD->bases())
+ if (diagnoseUnreadableFields(Info, E, BaseSpec.getType()))
+ return true;
+
+ // All mutable fields were empty, and thus not actually read.
+ return false;
+}
+
/// Kinds of access we can perform on an object, for diagnostics.
enum AccessKinds {
AK_Read,
@@ -2134,6 +2206,14 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
}
if (I == N) {
+ // If we are reading an object of class type, there may still be more
+ // things we need to check: if there are any mutable subobjects, we
+ // cannot perform this read. (This only happens when performing a trivial
+ // copy or assignment.)
+ if (ObjType->isRecordType() && handler.AccessKind == AK_Read &&
+ diagnoseUnreadableFields(Info, E, ObjType))
+ return handler.failed();
+
if (!handler.found(*O, ObjType))
return false;
@@ -2490,7 +2570,7 @@ CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E, AccessKinds AK,
// Unless we're looking at a local variable or argument in a constexpr call,
// the variable we're reading must be const.
if (!Frame) {
- if (Info.getLangOpts().CPlusPlus1y &&
+ if (Info.getLangOpts().CPlusPlus14 &&
VD == Info.EvaluatingDecl.dyn_cast<const ValueDecl *>()) {
// OK, we can read and modify an object if we're in the process of
// evaluating its initializer, because its lifetime began in this
@@ -2606,7 +2686,7 @@ CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E, AccessKinds AK,
//
// FIXME: Not all local state is mutable. Allow local constant subobjects
// to be read here (but take care with 'mutable' fields).
- if (Frame && Info.getLangOpts().CPlusPlus1y &&
+ if (Frame && Info.getLangOpts().CPlusPlus14 &&
(Info.EvalStatus.HasSideEffects || Info.keepEvaluatingAfterFailure()))
return CompleteObject();
@@ -2648,10 +2728,10 @@ static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
return false;
CompleteObject LitObj(&Lit, Base->getType());
return extractSubobject(Info, Conv, LitObj, LVal.Designator, RVal);
- } else if (isa<StringLiteral>(Base)) {
+ } else if (isa<StringLiteral>(Base) || isa<PredefinedExpr>(Base)) {
// We represent a string literal array as an lvalue pointing at the
// corresponding expression, rather than building an array of chars.
- // FIXME: Support PredefinedExpr, ObjCEncodeExpr, MakeStringConstant
+ // FIXME: Support ObjCEncodeExpr, MakeStringConstant
APValue Str(Base, CharUnits::Zero(), APValue::NoLValuePath(), 0);
CompleteObject StrObj(&Str, Base->getType());
return extractSubobject(Info, Conv, StrObj, LVal.Designator, RVal);
@@ -2668,7 +2748,7 @@ static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal,
if (LVal.Designator.Invalid)
return false;
- if (!Info.getLangOpts().CPlusPlus1y) {
+ if (!Info.getLangOpts().CPlusPlus14) {
Info.Diag(E);
return false;
}
@@ -2789,7 +2869,7 @@ static bool handleCompoundAssignment(
if (LVal.Designator.Invalid)
return false;
- if (!Info.getLangOpts().CPlusPlus1y) {
+ if (!Info.getLangOpts().CPlusPlus14) {
Info.Diag(E);
return false;
}
@@ -2938,7 +3018,7 @@ static bool handleIncDec(EvalInfo &Info, const Expr *E, const LValue &LVal,
if (LVal.Designator.Invalid)
return false;
- if (!Info.getLangOpts().CPlusPlus1y) {
+ if (!Info.getLangOpts().CPlusPlus14) {
Info.Diag(E);
return false;
}
@@ -3588,6 +3668,22 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
return false;
}
+/// Determine if a class has any fields that might need to be copied by a
+/// trivial copy or move operation.
+static bool hasFields(const CXXRecordDecl *RD) {
+ if (!RD || RD->isEmpty())
+ return false;
+ for (auto *FD : RD->fields()) {
+ if (FD->isUnnamedBitfield())
+ continue;
+ return true;
+ }
+ for (auto &Base : RD->bases())
+ if (hasFields(Base.getType()->getAsCXXRecordDecl()))
+ return true;
+ return false;
+}
+
namespace {
typedef SmallVector<APValue, 8> ArgVector;
}
@@ -3626,8 +3722,12 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
// For a trivial copy or move assignment, perform an APValue copy. This is
// essential for unions, where the operations performed by the assignment
// operator cannot be represented as statements.
+ //
+ // Skip this for non-union classes with no fields; in that case, the defaulted
+ // copy/move does not actually read the object.
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Callee);
- if (MD && MD->isDefaulted() && MD->isTrivial()) {
+ if (MD && MD->isDefaulted() && MD->isTrivial() &&
+ (MD->getParent()->isUnion() || hasFields(MD->getParent()))) {
assert(This &&
(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()));
LValue RHS;
@@ -3684,11 +3784,18 @@ static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
}
// For a trivial copy or move constructor, perform an APValue copy. This is
- // essential for unions, where the operations performed by the constructor
- // cannot be represented by ctor-initializers.
+ // essential for unions (or classes with anonymous union members), where the
+ // operations performed by the constructor cannot be represented by
+ // ctor-initializers.
+ //
+ // Skip this for empty non-union classes; we should not perform an
+ // lvalue-to-rvalue conversion on them because their copy constructor does not
+ // actually read them.
if (Definition->isDefaulted() &&
((Definition->isCopyConstructor() && Definition->isTrivial()) ||
- (Definition->isMoveConstructor() && Definition->isTrivial()))) {
+ (Definition->isMoveConstructor() && Definition->isTrivial())) &&
+ (Definition->getParent()->isUnion() ||
+ hasFields(Definition->getParent()))) {
LValue RHS;
RHS.setFrom(Info.Ctx, ArgValues[0]);
return handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(),
@@ -3985,7 +4092,7 @@ public:
const FunctionDecl *FD = nullptr;
LValue *This = nullptr, ThisVal;
- ArrayRef<const Expr *> Args(E->getArgs(), E->getNumArgs());
+ auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
bool HasQualifier = false;
// Extract function decl and 'this' pointer from the callee.
@@ -4148,7 +4255,7 @@ public:
return VisitUnaryPostIncDec(UO);
}
bool VisitUnaryPostIncDec(const UnaryOperator *UO) {
- if (!Info.getLangOpts().CPlusPlus1y && !Info.keepEvaluatingAfterFailure())
+ if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
return Error(UO);
LValue LVal;
@@ -4573,7 +4680,7 @@ bool LValueExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
}
bool LValueExprEvaluator::VisitUnaryPreIncDec(const UnaryOperator *UO) {
- if (!Info.getLangOpts().CPlusPlus1y && !Info.keepEvaluatingAfterFailure())
+ if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
return Error(UO);
if (!this->Visit(UO->getSubExpr()))
@@ -4586,7 +4693,7 @@ bool LValueExprEvaluator::VisitUnaryPreIncDec(const UnaryOperator *UO) {
bool LValueExprEvaluator::VisitCompoundAssignOperator(
const CompoundAssignOperator *CAO) {
- if (!Info.getLangOpts().CPlusPlus1y && !Info.keepEvaluatingAfterFailure())
+ if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
return Error(CAO);
APValue RHS;
@@ -4608,7 +4715,7 @@ bool LValueExprEvaluator::VisitCompoundAssignOperator(
}
bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
- if (!Info.getLangOpts().CPlusPlus1y && !Info.keepEvaluatingAfterFailure())
+ if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
return Error(E);
APValue NewVal;
@@ -4733,6 +4840,7 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
+ case CK_AddressSpaceConversion:
if (!Visit(SubExpr))
return false;
// Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
@@ -4818,6 +4926,38 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
return ExprEvaluatorBaseTy::VisitCastExpr(E);
}
+static CharUnits GetAlignOfType(EvalInfo &Info, QualType T) {
+ // C++ [expr.alignof]p3:
+ // When alignof is applied to a reference type, the result is the
+ // alignment of the referenced type.
+ if (const ReferenceType *Ref = T->getAs<ReferenceType>())
+ T = Ref->getPointeeType();
+
+ // __alignof is defined to return the preferred alignment.
+ return Info.Ctx.toCharUnitsFromBits(
+ Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
+}
+
+static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E) {
+ E = E->IgnoreParens();
+
+ // The kinds of expressions that we have special-case logic here for
+ // should be kept up to date with the special checks for those
+ // expressions in Sema.
+
+ // alignof decl is always accepted, even if it doesn't make sense: we default
+ // to 1 in those cases.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return Info.Ctx.getDeclAlign(DRE->getDecl(),
+ /*RefAsPointee*/true);
+
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(E))
+ return Info.Ctx.getDeclAlign(ME->getMemberDecl(),
+ /*RefAsPointee*/true);
+
+ return GetAlignOfType(Info, E->getType());
+}
+
bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
if (IsStringLiteralCall(E))
return Success(E);
@@ -4825,7 +4965,71 @@ bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
switch (E->getBuiltinCallee()) {
case Builtin::BI__builtin_addressof:
return EvaluateLValue(E->getArg(0), Result, Info);
+ case Builtin::BI__builtin_assume_aligned: {
+ // We need to be very careful here because: if the pointer does not have the
+ // asserted alignment, then the behavior is undefined, and undefined
+ // behavior is non-constant.
+ if (!EvaluatePointer(E->getArg(0), Result, Info))
+ return false;
+
+ LValue OffsetResult(Result);
+ APSInt Alignment;
+ if (!EvaluateInteger(E->getArg(1), Alignment, Info))
+ return false;
+ CharUnits Align = CharUnits::fromQuantity(getExtValue(Alignment));
+
+ if (E->getNumArgs() > 2) {
+ APSInt Offset;
+ if (!EvaluateInteger(E->getArg(2), Offset, Info))
+ return false;
+
+ int64_t AdditionalOffset = -getExtValue(Offset);
+ OffsetResult.Offset += CharUnits::fromQuantity(AdditionalOffset);
+ }
+
+ // If there is a base object, then it must have the correct alignment.
+ if (OffsetResult.Base) {
+ CharUnits BaseAlignment;
+ if (const ValueDecl *VD =
+ OffsetResult.Base.dyn_cast<const ValueDecl*>()) {
+ BaseAlignment = Info.Ctx.getDeclAlign(VD);
+ } else {
+ BaseAlignment =
+ GetAlignOfExpr(Info, OffsetResult.Base.get<const Expr*>());
+ }
+
+ if (BaseAlignment < Align) {
+ Result.Designator.setInvalid();
+ // FIXME: Quantities here cast to integers because the plural modifier
+ // does not work on APSInts yet.
+ CCEDiag(E->getArg(0),
+ diag::note_constexpr_baa_insufficient_alignment) << 0
+ << (int) BaseAlignment.getQuantity()
+ << (unsigned) getExtValue(Alignment);
+ return false;
+ }
+ }
+
+ // The offset must also have the correct alignment.
+ if (OffsetResult.Offset.RoundUpToAlignment(Align) != OffsetResult.Offset) {
+ Result.Designator.setInvalid();
+ APSInt Offset(64, false);
+ Offset = OffsetResult.Offset.getQuantity();
+ if (OffsetResult.Base)
+ CCEDiag(E->getArg(0),
+ diag::note_constexpr_baa_insufficient_alignment) << 1
+ << (int) getExtValue(Offset) << (unsigned) getExtValue(Alignment);
+ else
+ CCEDiag(E->getArg(0),
+ diag::note_constexpr_baa_value_insufficient_alignment)
+ << Offset << (unsigned) getExtValue(Alignment);
+
+ return false;
+ }
+
+ return true;
+ }
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
}
@@ -5166,7 +5370,7 @@ bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
if (ZeroInit && !ZeroInitialization(E))
return false;
- ArrayRef<const Expr *> Args(E->getArgs(), E->getNumArgs());
+ auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
return HandleConstructorCall(E->getExprLoc(), This, Args,
cast<CXXConstructorDecl>(Definition), Info,
Result);
@@ -5270,6 +5474,9 @@ public:
bool VisitCallExpr(const CallExpr *E) {
return VisitConstructExpr(E);
}
+ bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E) {
+ return VisitConstructExpr(E);
+ }
};
} // end anonymous namespace
@@ -5645,7 +5852,7 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
return false;
}
- ArrayRef<const Expr *> Args(E->getArgs(), E->getNumArgs());
+ auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
return HandleConstructorCall(E->getExprLoc(), Subobject, Args,
cast<CXXConstructorDecl>(Definition),
Info, *Value);
@@ -5786,8 +5993,6 @@ public:
bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
private:
- CharUnits GetAlignOfExpr(const Expr *E);
- CharUnits GetAlignOfType(QualType T);
static QualType GetObjectType(APValue::LValueBase B);
bool TryEvaluateBuiltinObjectSize(const CallExpr *E);
// FIXME: Missing: array subscript of vector, member of vector
@@ -5985,8 +6190,20 @@ bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E) {
return false;
}
- // If we can prove the base is null, lower to zero now.
- if (!Base.getLValueBase()) return Success(0, E);
+ if (!Base.getLValueBase()) {
+ // It is not possible to determine which objects ptr points to at compile time,
+ // __builtin_object_size should return (size_t) -1 for type 0 or 1
+ // and (size_t) 0 for type 2 or 3.
+ llvm::APSInt TypeIntVaue;
+ const Expr *ExprType = E->getArg(1);
+ if (!ExprType->EvaluateAsInt(TypeIntVaue, Info.Ctx))
+ return false;
+ if (TypeIntVaue == 0 || TypeIntVaue == 1)
+ return Success(-1, E);
+ if (TypeIntVaue == 2 || TypeIntVaue == 3)
+ return Success(0, E);
+ return Error(E);
+ }
QualType T = GetObjectType(Base.getLValueBase());
if (T.isNull() ||
@@ -6286,6 +6503,27 @@ static bool HasSameBase(const LValue &A, const LValue &B) {
A.getLValueCallIndex() == B.getLValueCallIndex();
}
+/// \brief Determine whether this is a pointer past the end of the complete
+/// object referred to by the lvalue.
+static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx,
+ const LValue &LV) {
+ // A null pointer can be viewed as being "past the end" but we don't
+ // choose to look at it that way here.
+ if (!LV.getLValueBase())
+ return false;
+
+ // If the designator is valid and refers to a subobject, we're not pointing
+ // past the end.
+ if (!LV.getLValueDesignator().Invalid &&
+ !LV.getLValueDesignator().isOnePastTheEnd())
+ return false;
+
+ // We're a past-the-end pointer if we point to the byte after the object,
+ // no matter what our type or path is.
+ auto Size = Ctx.getTypeSizeInChars(getType(LV.getLValueBase()));
+ return LV.getLValueOffset() == Size;
+}
+
namespace {
/// \brief Data recursive integer evaluator of certain binary operators.
@@ -6605,15 +6843,27 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
QualType LHSTy = E->getLHS()->getType();
QualType RHSTy = E->getRHS()->getType();
- if (LHSTy->isAnyComplexType()) {
- assert(RHSTy->isAnyComplexType() && "Invalid comparison");
+ if (LHSTy->isAnyComplexType() || RHSTy->isAnyComplexType()) {
ComplexValue LHS, RHS;
-
- bool LHSOK = EvaluateComplex(E->getLHS(), LHS, Info);
+ bool LHSOK;
+ if (E->getLHS()->getType()->isRealFloatingType()) {
+ LHSOK = EvaluateFloat(E->getLHS(), LHS.FloatReal, Info);
+ if (LHSOK) {
+ LHS.makeComplexFloat();
+ LHS.FloatImag = APFloat(LHS.FloatReal.getSemantics());
+ }
+ } else {
+ LHSOK = EvaluateComplex(E->getLHS(), LHS, Info);
+ }
if (!LHSOK && !Info.keepEvaluatingAfterFailure())
return false;
- if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
+ if (E->getRHS()->getType()->isRealFloatingType()) {
+ if (!EvaluateFloat(E->getRHS(), RHS.FloatReal, Info) || !LHSOK)
+ return false;
+ RHS.makeComplexFloat();
+ RHS.FloatImag = APFloat(RHS.FloatReal.getSemantics());
+ } else if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
return false;
if (LHS.isComplexFloat()) {
@@ -6736,6 +6986,18 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
// object.
if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue))
return Error(E);
+ // We can't compare the address of the start of one object with the
+ // past-the-end address of another object, per C++ DR1652.
+ if ((LHSValue.Base && LHSValue.Offset.isZero() &&
+ isOnePastTheEndOfCompleteObject(Info.Ctx, RHSValue)) ||
+ (RHSValue.Base && RHSValue.Offset.isZero() &&
+ isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue)))
+ return Error(E);
+ // We can't tell whether an object is at the same address as another
+ // zero sized object.
+ if ((RHSValue.Base && isZeroSized(LHSValue)) ||
+ (LHSValue.Base && isZeroSized(RHSValue)))
+ return Error(E);
// Pointers with different bases cannot represent the same object.
// (Note that clang defaults to -fmerge-all-constants, which can
// lead to inconsistent results for comparisons involving the address
@@ -6940,39 +7202,6 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
}
-CharUnits IntExprEvaluator::GetAlignOfType(QualType T) {
- // C++ [expr.alignof]p3:
- // When alignof is applied to a reference type, the result is the
- // alignment of the referenced type.
- if (const ReferenceType *Ref = T->getAs<ReferenceType>())
- T = Ref->getPointeeType();
-
- // __alignof is defined to return the preferred alignment.
- return Info.Ctx.toCharUnitsFromBits(
- Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
-}
-
-CharUnits IntExprEvaluator::GetAlignOfExpr(const Expr *E) {
- E = E->IgnoreParens();
-
- // The kinds of expressions that we have special-case logic here for
- // should be kept up to date with the special checks for those
- // expressions in Sema.
-
- // alignof decl is always accepted, even if it doesn't make sense: we default
- // to 1 in those cases.
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
- return Info.Ctx.getDeclAlign(DRE->getDecl(),
- /*RefAsPointee*/true);
-
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(E))
- return Info.Ctx.getDeclAlign(ME->getMemberDecl(),
- /*RefAsPointee*/true);
-
- return GetAlignOfType(E->getType());
-}
-
-
/// VisitUnaryExprOrTypeTraitExpr - Evaluate a sizeof, alignof or vec_step with
/// a result as the expression's type.
bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
@@ -6980,9 +7209,9 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
switch(E->getKind()) {
case UETT_AlignOf: {
if (E->isArgumentType())
- return Success(GetAlignOfType(E->getArgumentType()), E);
+ return Success(GetAlignOfType(Info, E->getArgumentType()), E);
else
- return Success(GetAlignOfExpr(E->getArgumentExpr()), E);
+ return Success(GetAlignOfExpr(Info, E->getArgumentExpr()), E);
}
case UETT_VecStep: {
@@ -7732,24 +7961,49 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
- bool LHSOK = Visit(E->getLHS());
+ // Track whether the LHS or RHS is real at the type system level. When this is
+ // the case we can simplify our evaluation strategy.
+ bool LHSReal = false, RHSReal = false;
+
+ bool LHSOK;
+ if (E->getLHS()->getType()->isRealFloatingType()) {
+ LHSReal = true;
+ APFloat &Real = Result.FloatReal;
+ LHSOK = EvaluateFloat(E->getLHS(), Real, Info);
+ if (LHSOK) {
+ Result.makeComplexFloat();
+ Result.FloatImag = APFloat(Real.getSemantics());
+ }
+ } else {
+ LHSOK = Visit(E->getLHS());
+ }
if (!LHSOK && !Info.keepEvaluatingAfterFailure())
return false;
ComplexValue RHS;
- if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
+ if (E->getRHS()->getType()->isRealFloatingType()) {
+ RHSReal = true;
+ APFloat &Real = RHS.FloatReal;
+ if (!EvaluateFloat(E->getRHS(), Real, Info) || !LHSOK)
+ return false;
+ RHS.makeComplexFloat();
+ RHS.FloatImag = APFloat(Real.getSemantics());
+ } else if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
return false;
- assert(Result.isComplexFloat() == RHS.isComplexFloat() &&
- "Invalid operands to binary operator.");
+ assert(!(LHSReal && RHSReal) &&
+ "Cannot have both operands of a complex operation be real.");
switch (E->getOpcode()) {
default: return Error(E);
case BO_Add:
if (Result.isComplexFloat()) {
Result.getComplexFloatReal().add(RHS.getComplexFloatReal(),
APFloat::rmNearestTiesToEven);
- Result.getComplexFloatImag().add(RHS.getComplexFloatImag(),
- APFloat::rmNearestTiesToEven);
+ if (LHSReal)
+ Result.getComplexFloatImag() = RHS.getComplexFloatImag();
+ else if (!RHSReal)
+ Result.getComplexFloatImag().add(RHS.getComplexFloatImag(),
+ APFloat::rmNearestTiesToEven);
} else {
Result.getComplexIntReal() += RHS.getComplexIntReal();
Result.getComplexIntImag() += RHS.getComplexIntImag();
@@ -7759,8 +8013,13 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (Result.isComplexFloat()) {
Result.getComplexFloatReal().subtract(RHS.getComplexFloatReal(),
APFloat::rmNearestTiesToEven);
- Result.getComplexFloatImag().subtract(RHS.getComplexFloatImag(),
- APFloat::rmNearestTiesToEven);
+ if (LHSReal) {
+ Result.getComplexFloatImag() = RHS.getComplexFloatImag();
+ Result.getComplexFloatImag().changeSign();
+ } else if (!RHSReal) {
+ Result.getComplexFloatImag().subtract(RHS.getComplexFloatImag(),
+ APFloat::rmNearestTiesToEven);
+ }
} else {
Result.getComplexIntReal() -= RHS.getComplexIntReal();
Result.getComplexIntImag() -= RHS.getComplexIntImag();
@@ -7768,25 +8027,75 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
break;
case BO_Mul:
if (Result.isComplexFloat()) {
+ // This is an implementation of complex multiplication according to the
+ // constraints laid out in C11 Annex G. The implemantion uses the
+ // following naming scheme:
+ // (a + ib) * (c + id)
ComplexValue LHS = Result;
- APFloat &LHS_r = LHS.getComplexFloatReal();
- APFloat &LHS_i = LHS.getComplexFloatImag();
- APFloat &RHS_r = RHS.getComplexFloatReal();
- APFloat &RHS_i = RHS.getComplexFloatImag();
-
- APFloat Tmp = LHS_r;
- Tmp.multiply(RHS_r, APFloat::rmNearestTiesToEven);
- Result.getComplexFloatReal() = Tmp;
- Tmp = LHS_i;
- Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
- Result.getComplexFloatReal().subtract(Tmp, APFloat::rmNearestTiesToEven);
-
- Tmp = LHS_r;
- Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
- Result.getComplexFloatImag() = Tmp;
- Tmp = LHS_i;
- Tmp.multiply(RHS_r, APFloat::rmNearestTiesToEven);
- Result.getComplexFloatImag().add(Tmp, APFloat::rmNearestTiesToEven);
+ APFloat &A = LHS.getComplexFloatReal();
+ APFloat &B = LHS.getComplexFloatImag();
+ APFloat &C = RHS.getComplexFloatReal();
+ APFloat &D = RHS.getComplexFloatImag();
+ APFloat &ResR = Result.getComplexFloatReal();
+ APFloat &ResI = Result.getComplexFloatImag();
+ if (LHSReal) {
+ assert(!RHSReal && "Cannot have two real operands for a complex op!");
+ ResR = A * C;
+ ResI = A * D;
+ } else if (RHSReal) {
+ ResR = C * A;
+ ResI = C * B;
+ } else {
+ // In the fully general case, we need to handle NaNs and infinities
+ // robustly.
+ APFloat AC = A * C;
+ APFloat BD = B * D;
+ APFloat AD = A * D;
+ APFloat BC = B * C;
+ ResR = AC - BD;
+ ResI = AD + BC;
+ if (ResR.isNaN() && ResI.isNaN()) {
+ bool Recalc = false;
+ if (A.isInfinity() || B.isInfinity()) {
+ A = APFloat::copySign(
+ APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A);
+ B = APFloat::copySign(
+ APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B);
+ if (C.isNaN())
+ C = APFloat::copySign(APFloat(C.getSemantics()), C);
+ if (D.isNaN())
+ D = APFloat::copySign(APFloat(D.getSemantics()), D);
+ Recalc = true;
+ }
+ if (C.isInfinity() || D.isInfinity()) {
+ C = APFloat::copySign(
+ APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C);
+ D = APFloat::copySign(
+ APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D);
+ if (A.isNaN())
+ A = APFloat::copySign(APFloat(A.getSemantics()), A);
+ if (B.isNaN())
+ B = APFloat::copySign(APFloat(B.getSemantics()), B);
+ Recalc = true;
+ }
+ if (!Recalc && (AC.isInfinity() || BD.isInfinity() ||
+ AD.isInfinity() || BC.isInfinity())) {
+ if (A.isNaN())
+ A = APFloat::copySign(APFloat(A.getSemantics()), A);
+ if (B.isNaN())
+ B = APFloat::copySign(APFloat(B.getSemantics()), B);
+ if (C.isNaN())
+ C = APFloat::copySign(APFloat(C.getSemantics()), C);
+ if (D.isNaN())
+ D = APFloat::copySign(APFloat(D.getSemantics()), D);
+ Recalc = true;
+ }
+ if (Recalc) {
+ ResR = APFloat::getInf(A.getSemantics()) * (A * C - B * D);
+ ResI = APFloat::getInf(A.getSemantics()) * (A * D + B * C);
+ }
+ }
+ }
} else {
ComplexValue LHS = Result;
Result.getComplexIntReal() =
@@ -7799,33 +8108,57 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
break;
case BO_Div:
if (Result.isComplexFloat()) {
+ // This is an implementation of complex division according to the
+ // constraints laid out in C11 Annex G. The implemantion uses the
+ // following naming scheme:
+ // (a + ib) / (c + id)
ComplexValue LHS = Result;
- APFloat &LHS_r = LHS.getComplexFloatReal();
- APFloat &LHS_i = LHS.getComplexFloatImag();
- APFloat &RHS_r = RHS.getComplexFloatReal();
- APFloat &RHS_i = RHS.getComplexFloatImag();
- APFloat &Res_r = Result.getComplexFloatReal();
- APFloat &Res_i = Result.getComplexFloatImag();
-
- APFloat Den = RHS_r;
- Den.multiply(RHS_r, APFloat::rmNearestTiesToEven);
- APFloat Tmp = RHS_i;
- Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
- Den.add(Tmp, APFloat::rmNearestTiesToEven);
-
- Res_r = LHS_r;
- Res_r.multiply(RHS_r, APFloat::rmNearestTiesToEven);
- Tmp = LHS_i;
- Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
- Res_r.add(Tmp, APFloat::rmNearestTiesToEven);
- Res_r.divide(Den, APFloat::rmNearestTiesToEven);
-
- Res_i = LHS_i;
- Res_i.multiply(RHS_r, APFloat::rmNearestTiesToEven);
- Tmp = LHS_r;
- Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
- Res_i.subtract(Tmp, APFloat::rmNearestTiesToEven);
- Res_i.divide(Den, APFloat::rmNearestTiesToEven);
+ APFloat &A = LHS.getComplexFloatReal();
+ APFloat &B = LHS.getComplexFloatImag();
+ APFloat &C = RHS.getComplexFloatReal();
+ APFloat &D = RHS.getComplexFloatImag();
+ APFloat &ResR = Result.getComplexFloatReal();
+ APFloat &ResI = Result.getComplexFloatImag();
+ if (RHSReal) {
+ ResR = A / C;
+ ResI = B / C;
+ } else {
+ if (LHSReal) {
+ // No real optimizations we can do here, stub out with zero.
+ B = APFloat::getZero(A.getSemantics());
+ }
+ int DenomLogB = 0;
+ APFloat MaxCD = maxnum(abs(C), abs(D));
+ if (MaxCD.isFinite()) {
+ DenomLogB = ilogb(MaxCD);
+ C = scalbn(C, -DenomLogB);
+ D = scalbn(D, -DenomLogB);
+ }
+ APFloat Denom = C * C + D * D;
+ ResR = scalbn((A * C + B * D) / Denom, -DenomLogB);
+ ResI = scalbn((B * C - A * D) / Denom, -DenomLogB);
+ if (ResR.isNaN() && ResI.isNaN()) {
+ if (Denom.isPosZero() && (!A.isNaN() || !B.isNaN())) {
+ ResR = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * A;
+ ResI = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * B;
+ } else if ((A.isInfinity() || B.isInfinity()) && C.isFinite() &&
+ D.isFinite()) {
+ A = APFloat::copySign(
+ APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A);
+ B = APFloat::copySign(
+ APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B);
+ ResR = APFloat::getInf(ResR.getSemantics()) * (A * C + B * D);
+ ResI = APFloat::getInf(ResI.getSemantics()) * (B * C - A * D);
+ } else if (MaxCD.isInfinity() && A.isFinite() && B.isFinite()) {
+ C = APFloat::copySign(
+ APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C);
+ D = APFloat::copySign(
+ APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D);
+ ResR = APFloat::getZero(ResR.getSemantics()) * (A * C + B * D);
+ ResI = APFloat::getZero(ResI.getSemantics()) * (B * C - A * D);
+ }
+ }
+ }
} else {
if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0)
return Error(E, diag::note_expr_divide_by_zero);
@@ -7966,6 +8299,7 @@ public:
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
case Builtin::BI__assume:
+ case Builtin::BI__builtin_assume:
// The argument is not evaluated!
return true;
}
@@ -8338,6 +8672,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::CXXDeleteExprClass:
case Expr::CXXPseudoDestructorExprClass:
case Expr::UnresolvedLookupExprClass:
+ case Expr::TypoExprClass:
case Expr::DependentScopeDeclRefExprClass:
case Expr::CXXConstructExprClass:
case Expr::CXXStdInitializerListExprClass:
@@ -8373,6 +8708,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::PseudoObjectExprClass:
case Expr::AtomicExprClass:
case Expr::LambdaExprClass:
+ case Expr::CXXFoldExprClass:
return ICEDiag(IK_NotICE, E->getLocStart());
case Expr::InitListExprClass: {
@@ -8682,7 +9018,11 @@ static bool EvaluateCPlusPlus11IntegralConstantExpr(const ASTContext &Ctx,
if (!E->isCXX11ConstantExpr(Ctx, &Result, Loc))
return false;
- assert(Result.isInt() && "pointer cast to int is not an ICE");
+ if (!Result.isInt()) {
+ if (Loc) *Loc = E->getExprLoc();
+ return false;
+ }
+
if (Value) *Value = Result.getInt();
return true;
}
@@ -8751,7 +9091,8 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
ArgVector ArgValues(Args.size());
for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
I != E; ++I) {
- if (!Evaluate(ArgValues[I - Args.begin()], Info, *I))
+ if ((*I)->isValueDependent() ||
+ !Evaluate(ArgValues[I - Args.begin()], Info, *I))
// If evaluation fails, throw away the argument entirely.
ArgValues[I - Args.begin()] = APValue();
if (Info.EvalStatus.HasSideEffects)
diff --git a/lib/AST/ItaniumCXXABI.cpp b/lib/AST/ItaniumCXXABI.cpp
index b5f8c0f4bc87..378121c8e5b9 100644
--- a/lib/AST/ItaniumCXXABI.cpp
+++ b/lib/AST/ItaniumCXXABI.cpp
@@ -29,16 +29,64 @@ using namespace clang;
namespace {
+/// According to Itanium C++ ABI 5.1.2:
+/// the name of an anonymous union is considered to be
+/// the name of the first named data member found by a pre-order,
+/// depth-first, declaration-order walk of the data members of
+/// the anonymous union.
+/// If there is no such data member (i.e., if all of the data members
+/// in the union are unnamed), then there is no way for a program to
+/// refer to the anonymous union, and there is therefore no need to mangle its name.
+///
+/// Returns the name of anonymous union VarDecl or nullptr if it is not found.
+static const IdentifierInfo *findAnonymousUnionVarDeclName(const VarDecl& VD) {
+ const RecordType *RT = VD.getType()->getAs<RecordType>();
+ assert(RT && "type of VarDecl is expected to be RecordType.");
+ assert(RT->getDecl()->isUnion() && "RecordType is expected to be a union.");
+ if (const FieldDecl *FD = RT->getDecl()->findFirstNamedDataMember()) {
+ return FD->getIdentifier();
+ }
+
+ return nullptr;
+}
+
/// \brief Keeps track of the mangled names of lambda expressions and block
/// literals within a particular context.
class ItaniumNumberingContext : public MangleNumberingContext {
- llvm::DenseMap<IdentifierInfo*, unsigned> VarManglingNumbers;
- llvm::DenseMap<IdentifierInfo*, unsigned> TagManglingNumbers;
+ llvm::DenseMap<const Type *, unsigned> ManglingNumbers;
+ llvm::DenseMap<const IdentifierInfo *, unsigned> VarManglingNumbers;
+ llvm::DenseMap<const IdentifierInfo *, unsigned> TagManglingNumbers;
public:
+ unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override {
+ const FunctionProtoType *Proto =
+ CallOperator->getType()->getAs<FunctionProtoType>();
+ ASTContext &Context = CallOperator->getASTContext();
+
+ QualType Key =
+ Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(),
+ FunctionProtoType::ExtProtoInfo());
+ Key = Context.getCanonicalType(Key);
+ return ++ManglingNumbers[Key->castAs<FunctionProtoType>()];
+ }
+
+ unsigned getManglingNumber(const BlockDecl *BD) override {
+ const Type *Ty = nullptr;
+ return ++ManglingNumbers[Ty];
+ }
+
+ unsigned getStaticLocalNumber(const VarDecl *VD) override {
+ return 0;
+ }
+
/// Variable decls are numbered by identifier.
unsigned getManglingNumber(const VarDecl *VD, unsigned) override {
- return ++VarManglingNumbers[VD->getIdentifier()];
+ const IdentifierInfo *Identifier = VD->getIdentifier();
+ if (!Identifier) {
+ // VarDecl without an identifier represents an anonymous union declaration.
+ Identifier = findAnonymousUnionVarDeclName(*VD);
+ }
+ return ++VarManglingNumbers[Identifier];
}
unsigned getManglingNumber(const TagDecl *TD, unsigned) override {
diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp
index 977d6fca2c40..156ad646fa6d 100644
--- a/lib/AST/ItaniumMangle.cpp
+++ b/lib/AST/ItaniumMangle.cpp
@@ -117,7 +117,7 @@ class ItaniumMangleContextImpl : public ItaniumMangleContext {
typedef std::pair<const DeclContext*, IdentifierInfo*> DiscriminatorKeyTy;
llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator;
llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
-
+
public:
explicit ItaniumMangleContextImpl(ASTContext &Context,
DiagnosticsEngine &Diags)
@@ -150,6 +150,8 @@ public:
void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
raw_ostream &) override;
+ void mangleCXXCtorComdat(const CXXConstructorDecl *D, raw_ostream &) override;
+ void mangleCXXDtorComdat(const CXXDestructorDecl *D, raw_ostream &) override;
void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &) override;
void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override;
void mangleDynamicAtExitDestructor(const VarDecl *D,
@@ -373,6 +375,7 @@ private:
NamedDecl *firstQualifierLookup,
DeclarationName name,
unsigned knownArity);
+ void mangleCastExpression(const Expr *E, StringRef CastEncoding);
void mangleExpression(const Expr *E, unsigned Arity = UnknownArity);
void mangleCXXCtorType(CXXCtorType T);
void mangleCXXDtorType(CXXDtorType T);
@@ -634,13 +637,11 @@ void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) {
return;
// <template-template-param> ::= <template-param>
- if (const TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+ if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(ND))
mangleTemplateParameter(TTP->getIndex());
- return;
- }
+ else
+ mangleUnscopedName(ND->getTemplatedDecl());
- mangleUnscopedName(ND->getTemplatedDecl());
addSubstitution(ND);
}
@@ -811,6 +812,9 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
// We never want an 'E' here.
return;
+ case NestedNameSpecifier::Super:
+ llvm_unreachable("Can't mangle __super specifier");
+
case NestedNameSpecifier::Namespace:
if (qualifier->getPrefix())
mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup,
@@ -1046,24 +1050,6 @@ void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *qualifier,
mangleUnqualifiedName(nullptr, name, knownArity);
}
-static const FieldDecl *FindFirstNamedDataMember(const RecordDecl *RD) {
- assert(RD->isAnonymousStructOrUnion() &&
- "Expected anonymous struct or union!");
-
- for (const auto *I : RD->fields()) {
- if (I->getIdentifier())
- return I;
-
- if (const RecordType *RT = I->getType()->getAs<RecordType>())
- if (const FieldDecl *NamedDataMember =
- FindFirstNamedDataMember(RT->getDecl()))
- return NamedDataMember;
- }
-
- // We didn't find a named data member.
- return nullptr;
-}
-
void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
DeclarationName Name,
unsigned KnownArity) {
@@ -1100,9 +1086,9 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
// We must have an anonymous union or struct declaration.
- const RecordDecl *RD =
+ const RecordDecl *RD =
cast<RecordDecl>(VD->getType()->getAs<RecordType>()->getDecl());
-
+
// Itanium C++ ABI 5.1.2:
//
// For the purposes of mangling, the name of an anonymous union is
@@ -1112,14 +1098,16 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// the data members in the union are unnamed), then there is no way for
// a program to refer to the anonymous union, and there is therefore no
// need to mangle its name.
- const FieldDecl *FD = FindFirstNamedDataMember(RD);
+ assert(RD->isAnonymousStructOrUnion()
+ && "Expected anonymous struct or union!");
+ const FieldDecl *FD = RD->findFirstNamedDataMember();
// It's actually possible for various reasons for us to get here
// with an empty anonymous struct / union. Fortunately, it
// doesn't really matter what name we generate.
if (!FD) break;
assert(FD->getIdentifier() && "Data member name isn't an identifier!");
-
+
mangleSourceName(FD->getIdentifier());
break;
}
@@ -1409,8 +1397,8 @@ void CXXNameMangler::mangleUnqualifiedBlock(const BlockDecl *Block) {
if (!Number)
Number = Context.getBlockId(Block, false);
Out << "Ub";
- if (Number > 1)
- Out << Number - 2;
+ if (Number > 0)
+ Out << Number - 1;
Out << '_';
}
@@ -1459,6 +1447,9 @@ void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) {
// nothing
return;
+ case NestedNameSpecifier::Super:
+ llvm_unreachable("Can't mangle __super specifier");
+
case NestedNameSpecifier::Namespace:
mangleName(qualifier->getAsNamespace());
return;
@@ -1554,14 +1545,13 @@ void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND,
return;
// <template-template-param> ::= <template-param>
- if (const TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+ if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(ND)) {
mangleTemplateParameter(TTP->getIndex());
- return;
+ } else {
+ manglePrefix(getEffectiveDeclContext(ND), NoFunction);
+ mangleUnqualifiedName(ND->getTemplatedDecl());
}
- manglePrefix(getEffectiveDeclContext(ND), NoFunction);
- mangleUnqualifiedName(ND->getTemplatedDecl());
addSubstitution(ND);
}
@@ -1834,6 +1824,19 @@ void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
Context.mangleObjCMethodName(MD, Out);
}
+static bool isTypeSubstitutable(Qualifiers Quals, const Type *Ty) {
+ if (Quals)
+ return true;
+ if (Ty->isSpecificBuiltinType(BuiltinType::ObjCSel))
+ return true;
+ if (Ty->isOpenCLSpecificType())
+ return true;
+ if (Ty->isBuiltinType())
+ return false;
+
+ return true;
+}
+
void CXXNameMangler::mangleType(QualType T) {
// If our type is instantiation-dependent but not dependent, we mangle
// it as it was written in the source, removing any top-level sugar.
@@ -1875,7 +1878,7 @@ void CXXNameMangler::mangleType(QualType T) {
Qualifiers quals = split.Quals;
const Type *ty = split.Ty;
- bool isSubstitutable = quals || !isa<BuiltinType>(T);
+ bool isSubstitutable = isTypeSubstitutable(quals, ty);
if (isSubstitutable && mangleSubstitution(T))
return;
@@ -2301,9 +2304,7 @@ void CXXNameMangler::mangleType(const VectorType *T) {
llvm::Triple::ArchType Arch =
getASTContext().getTargetInfo().getTriple().getArch();
if ((Arch == llvm::Triple::aarch64 ||
- Arch == llvm::Triple::aarch64_be ||
- Arch == llvm::Triple::arm64_be ||
- Arch == llvm::Triple::arm64) && !Target.isOSDarwin())
+ Arch == llvm::Triple::aarch64_be) && !Target.isOSDarwin())
mangleAArch64NeonVectorType(T);
else
mangleNeonVectorType(T);
@@ -2528,6 +2529,18 @@ void CXXNameMangler::mangleMemberExpr(const Expr *base,
// <expression> ::= dt <expression> <unresolved-name>
// ::= pt <expression> <unresolved-name>
if (base) {
+
+ // Ignore member expressions involving anonymous unions.
+ while (const auto *RT = base->getType()->getAs<RecordType>()) {
+ if (!RT->getDecl()->isAnonymousStructOrUnion())
+ break;
+ const auto *ME = dyn_cast<MemberExpr>(base);
+ if (!ME)
+ break;
+ base = ME->getBase();
+ isArrow = ME->isArrow();
+ }
+
if (base->isImplicitCXXThis()) {
// Note: GCC mangles member expressions to the implicit 'this' as
// *this., whereas we represent them as this->. The Itanium C++ ABI
@@ -2572,12 +2585,23 @@ static bool isParenthesizedADLCallee(const CallExpr *call) {
return true;
}
+void CXXNameMangler::mangleCastExpression(const Expr *E, StringRef CastEncoding) {
+ const ExplicitCastExpr *ECE = cast<ExplicitCastExpr>(E);
+ Out << CastEncoding;
+ mangleType(ECE->getType());
+ mangleExpression(ECE->getSubExpr());
+}
+
void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
// <expression> ::= <unary operator-name> <expression>
// ::= <binary operator-name> <expression> <expression>
// ::= <trinary operator-name> <expression> <expression> <expression>
// ::= cv <type> expression # conversion with one argument
// ::= cv <type> _ <expression>* E # conversion with a different number of arguments
+ // ::= dc <type> <expression> # dynamic_cast<type> (expression)
+ // ::= sc <type> <expression> # static_cast<type> (expression)
+ // ::= cc <type> <expression> # const_cast<type> (expression)
+ // ::= rc <type> <expression> # reinterpret_cast<type> (expression)
// ::= st <type> # sizeof (a type)
// ::= at <type> # alignof (a type)
// ::= <template-param>
@@ -2612,6 +2636,7 @@ recurse:
case Expr::ParenListExprClass:
case Expr::LambdaExprClass:
case Expr::MSPropertyRefExprClass:
+ case Expr::TypoExprClass: // This should no longer exist in the AST by now.
llvm_unreachable("unexpected statement kind");
// FIXME: invent manglings for all these.
@@ -2643,7 +2668,6 @@ recurse:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
case Expr::VAArgExprClass:
- case Expr::CXXUuidofExprClass:
case Expr::CUDAKernelCallExprClass:
case Expr::AsTypeExprClass:
case Expr::PseudoObjectExprClass:
@@ -2658,6 +2682,20 @@ recurse:
break;
}
+ case Expr::CXXUuidofExprClass: {
+ const CXXUuidofExpr *UE = cast<CXXUuidofExpr>(E);
+ if (UE->isTypeOperand()) {
+ QualType UuidT = UE->getTypeOperand(Context.getASTContext());
+ Out << "u8__uuidoft";
+ mangleType(UuidT);
+ } else {
+ Expr *UuidExp = UE->getExprOperand();
+ Out << "u8__uuidofz";
+ mangleExpression(UuidExp, Arity);
+ }
+ break;
+ }
+
// Even gcc-4.5 doesn't mangle this.
case Expr::BinaryConditionalOperatorClass: {
DiagnosticsEngine &Diags = Context.getDiags();
@@ -2982,17 +3020,22 @@ recurse:
// Fall through to mangle the cast itself.
case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ mangleCastExpression(E, "cv");
+ break;
+
case Expr::CXXStaticCastExprClass:
+ mangleCastExpression(E, "sc");
+ break;
case Expr::CXXDynamicCastExprClass:
+ mangleCastExpression(E, "dc");
+ break;
case Expr::CXXReinterpretCastExprClass:
+ mangleCastExpression(E, "rc");
+ break;
case Expr::CXXConstCastExprClass:
- case Expr::CXXFunctionalCastExprClass: {
- const ExplicitCastExpr *ECE = cast<ExplicitCastExpr>(E);
- Out << "cv";
- mangleType(ECE->getType());
- mangleExpression(ECE->getSubExpr());
+ mangleCastExpression(E, "cc");
break;
- }
case Expr::CXXOperatorCallExprClass: {
const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
@@ -3175,12 +3218,33 @@ recurse:
mangleFunctionParam(cast<ParmVarDecl>(Pack));
break;
}
-
+
case Expr::MaterializeTemporaryExprClass: {
mangleExpression(cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr());
break;
}
-
+
+ case Expr::CXXFoldExprClass: {
+ auto *FE = cast<CXXFoldExpr>(E);
+ if (FE->isLeftFold())
+ Out << (FE->getInit() ? "fL" : "fl");
+ else
+ Out << (FE->getInit() ? "fR" : "fr");
+
+ if (FE->getOperator() == BO_PtrMemD)
+ Out << "ds";
+ else
+ mangleOperatorName(
+ BinaryOperator::getOverloadedOperator(FE->getOperator()),
+ /*Arity=*/2);
+
+ if (FE->getLHS())
+ mangleExpression(FE->getLHS());
+ if (FE->getRHS())
+ mangleExpression(FE->getRHS());
+ break;
+ }
+
case Expr::CXXThisExprClass:
Out << "fpT";
break;
@@ -3251,8 +3315,8 @@ void CXXNameMangler::mangleFunctionParam(const ParmVarDecl *parm) {
void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
// <ctor-dtor-name> ::= C1 # complete object constructor
// ::= C2 # base object constructor
- // ::= C3 # complete object allocating constructor
//
+ // In addition, C5 is a comdat name with C1 and C2 in it.
switch (T) {
case Ctor_Complete:
Out << "C1";
@@ -3260,8 +3324,8 @@ void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
case Ctor_Base:
Out << "C2";
break;
- case Ctor_CompleteAllocating:
- Out << "C3";
+ case Ctor_Comdat:
+ Out << "C5";
break;
}
}
@@ -3271,6 +3335,7 @@ void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
// ::= D1 # complete object destructor
// ::= D2 # base object destructor
//
+ // In addition, D5 is a comdat name with D1, D2 and, if virtual, D0 in it.
switch (T) {
case Dtor_Deleting:
Out << "D0";
@@ -3281,6 +3346,9 @@ void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
case Dtor_Base:
Out << "D2";
break;
+ case Dtor_Comdat:
+ Out << "D5";
+ break;
}
}
@@ -3363,7 +3431,7 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A) {
// and pointer-to-function expressions are represented as a declaration not
// an expression. We compensate for it here to produce the correct mangling.
ValueDecl *D = A.getAsDecl();
- bool compensateMangling = !A.isDeclForReferenceParam();
+ bool compensateMangling = !A.getParamTypeForDecl()->isReferenceType();
if (compensateMangling) {
Out << 'X';
mangleOperatorName(OO_Amp, 1);
@@ -3674,7 +3742,7 @@ void ItaniumMangleContextImpl::mangleCXXName(const NamedDecl *D,
"Mangling declaration");
CXXNameMangler Mangler(*this, Out, D);
- return Mangler.mangle(D);
+ Mangler.mangle(D);
}
void ItaniumMangleContextImpl::mangleCXXCtor(const CXXConstructorDecl *D,
@@ -3691,6 +3759,18 @@ void ItaniumMangleContextImpl::mangleCXXDtor(const CXXDestructorDecl *D,
Mangler.mangle(D);
}
+void ItaniumMangleContextImpl::mangleCXXCtorComdat(const CXXConstructorDecl *D,
+ raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out, D, Ctor_Comdat);
+ Mangler.mangle(D);
+}
+
+void ItaniumMangleContextImpl::mangleCXXDtorComdat(const CXXDestructorDecl *D,
+ raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out, D, Dtor_Comdat);
+ Mangler.mangle(D);
+}
+
void ItaniumMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
const ThunkInfo &Thunk,
raw_ostream &Out) {
@@ -3851,3 +3931,4 @@ ItaniumMangleContext *
ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
return new ItaniumMangleContextImpl(Context, Diags);
}
+
diff --git a/lib/AST/Mangle.cpp b/lib/AST/Mangle.cpp
index fdc00e389350..1a061c4f6632 100644
--- a/lib/AST/Mangle.cpp
+++ b/lib/AST/Mangle.cpp
@@ -49,10 +49,11 @@ static void mangleFunctionBlock(MangleContext &Context,
void MangleContext::anchor() { }
-enum StdOrFastCC {
- SOF_OTHER,
- SOF_FAST,
- SOF_STD
+enum CCMangling {
+ CCM_Other,
+ CCM_Fast,
+ CCM_Vector,
+ CCM_Std
};
static bool isExternC(const NamedDecl *ND) {
@@ -61,20 +62,22 @@ static bool isExternC(const NamedDecl *ND) {
return cast<VarDecl>(ND)->isExternC();
}
-static StdOrFastCC getStdOrFastCallMangling(const ASTContext &Context,
- const NamedDecl *ND) {
+static CCMangling getCallingConvMangling(const ASTContext &Context,
+ const NamedDecl *ND) {
const TargetInfo &TI = Context.getTargetInfo();
const llvm::Triple &Triple = TI.getTriple();
- if (!Triple.isOSWindows() || Triple.getArch() != llvm::Triple::x86)
- return SOF_OTHER;
+ if (!Triple.isOSWindows() ||
+ !(Triple.getArch() == llvm::Triple::x86 ||
+ Triple.getArch() == llvm::Triple::x86_64))
+ return CCM_Other;
if (Context.getLangOpts().CPlusPlus && !isExternC(ND) &&
TI.getCXXABI() == TargetCXXABI::Microsoft)
- return SOF_OTHER;
+ return CCM_Other;
const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND);
if (!FD)
- return SOF_OTHER;
+ return CCM_Other;
QualType T = FD->getType();
const FunctionType *FT = T->castAs<FunctionType>();
@@ -82,19 +85,21 @@ static StdOrFastCC getStdOrFastCallMangling(const ASTContext &Context,
CallingConv CC = FT->getCallConv();
switch (CC) {
default:
- return SOF_OTHER;
+ return CCM_Other;
case CC_X86FastCall:
- return SOF_FAST;
+ return CCM_Fast;
case CC_X86StdCall:
- return SOF_STD;
+ return CCM_Std;
+ case CC_X86VectorCall:
+ return CCM_Vector;
}
}
bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
const ASTContext &ASTContext = getASTContext();
- StdOrFastCC CC = getStdOrFastCallMangling(ASTContext, D);
- if (CC != SOF_OTHER)
+ CCMangling CC = getCallingConvMangling(ASTContext, D);
+ if (CC != CCM_Other)
return true;
// In C, functions with no attributes never need to be mangled. Fastpath them.
@@ -131,28 +136,35 @@ void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
}
const ASTContext &ASTContext = getASTContext();
- StdOrFastCC CC = getStdOrFastCallMangling(ASTContext, D);
+ CCMangling CC = getCallingConvMangling(ASTContext, D);
bool MCXX = shouldMangleCXXName(D);
const TargetInfo &TI = Context.getTargetInfo();
- if (CC == SOF_OTHER || (MCXX && TI.getCXXABI() == TargetCXXABI::Microsoft)) {
- mangleCXXName(D, Out);
+ if (CC == CCM_Other || (MCXX && TI.getCXXABI() == TargetCXXABI::Microsoft)) {
+ if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
+ mangleObjCMethodName(OMD, Out);
+ else
+ mangleCXXName(D, Out);
return;
}
Out << '\01';
- if (CC == SOF_STD)
+ if (CC == CCM_Std)
Out << '_';
- else
+ else if (CC == CCM_Fast)
Out << '@';
if (!MCXX)
Out << D->getIdentifier()->getName();
+ else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
+ mangleObjCMethodName(OMD, Out);
else
mangleCXXName(D, Out);
const FunctionDecl *FD = cast<FunctionDecl>(D);
const FunctionType *FT = FD->getType()->castAs<FunctionType>();
const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT);
+ if (CC == CCM_Vector)
+ Out << '@';
Out << '@';
if (!Proto) {
Out << '0';
@@ -164,9 +176,11 @@ void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
if (!MD->isStatic())
++ArgWords;
for (const auto &AT : Proto->param_types())
- // Size should be aligned to DWORD boundary
- ArgWords += llvm::RoundUpToAlignment(ASTContext.getTypeSize(AT), 32) / 32;
- Out << 4 * ArgWords;
+ // Size should be aligned to pointer size.
+ ArgWords += llvm::RoundUpToAlignment(ASTContext.getTypeSize(AT),
+ TI.getPointerWidth(0)) /
+ TI.getPointerWidth(0);
+ Out << ((TI.getPointerWidth(0) / 8) * ArgWords);
}
void MangleContext::mangleGlobalBlock(const BlockDecl *BD,
@@ -215,16 +229,28 @@ void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
mangleObjCMethodName(Method, Stream);
} else {
- const NamedDecl *ND = cast<NamedDecl>(DC);
- if (!shouldMangleDeclName(ND) && ND->getIdentifier())
- Stream << ND->getIdentifier()->getName();
- else {
- // FIXME: We were doing a mangleUnqualifiedName() before, but that's
- // a private member of a class that will soon itself be private to the
- // Itanium C++ ABI object. What should we do now? Right now, I'm just
- // calling the mangleName() method on the MangleContext; is there a
- // better way?
- mangleName(ND, Stream);
+ assert((isa<NamedDecl>(DC) || isa<BlockDecl>(DC)) &&
+ "expected a NamedDecl or BlockDecl");
+ if (isa<BlockDecl>(DC))
+ for (; DC && isa<BlockDecl>(DC); DC = DC->getParent())
+ (void) getBlockId(cast<BlockDecl>(DC), true);
+ assert((isa<TranslationUnitDecl>(DC) || isa<NamedDecl>(DC)) &&
+ "expected a TranslationUnitDecl or a NamedDecl");
+ if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
+ mangleCtorBlock(CD, /*CT*/ Ctor_Complete, BD, Out);
+ else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
+ mangleDtorBlock(DD, /*DT*/ Dtor_Complete, BD, Out);
+ else if (auto ND = dyn_cast<NamedDecl>(DC)) {
+ if (!shouldMangleDeclName(ND) && ND->getIdentifier())
+ Stream << ND->getIdentifier()->getName();
+ else {
+ // FIXME: We were doing a mangleUnqualifiedName() before, but that's
+ // a private member of a class that will soon itself be private to the
+ // Itanium C++ ABI object. What should we do now? Right now, I'm just
+ // calling the mangleName() method on the MangleContext; is there a
+ // better way?
+ mangleName(ND, Stream);
+ }
}
}
Stream.flush();
diff --git a/lib/AST/MangleNumberingContext.cpp b/lib/AST/MangleNumberingContext.cpp
deleted file mode 100644
index 5f40f0347d0e..000000000000
--- a/lib/AST/MangleNumberingContext.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-//===--- MangleNumberingContext.cpp - Context for mangling numbers --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the LambdaMangleContext class, which keeps track of
-// the Itanium C++ ABI mangling numbers for lambda expressions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/AST/MangleNumberingContext.h"
-#include "clang/AST/ASTContext.h"
-#include "clang/AST/DeclCXX.h"
-
-using namespace clang;
-
-unsigned
-MangleNumberingContext::getManglingNumber(const CXXMethodDecl *CallOperator) {
- const FunctionProtoType *Proto
- = CallOperator->getType()->getAs<FunctionProtoType>();
- ASTContext &Context = CallOperator->getASTContext();
-
- QualType Key = Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(),
- FunctionProtoType::ExtProtoInfo());
- Key = Context.getCanonicalType(Key);
- return ++ManglingNumbers[Key->castAs<FunctionProtoType>()];
-}
-
-unsigned
-MangleNumberingContext::getManglingNumber(const BlockDecl *BD) {
- // FIXME: Compute a BlockPointerType? Not obvious how.
- const Type *Ty = nullptr;
- return ++ManglingNumbers[Ty];
-}
-
-unsigned
-MangleNumberingContext::getStaticLocalNumber(const VarDecl *VD) {
- // FIXME: Compute a BlockPointerType? Not obvious how.
- const Type *Ty = nullptr;
- return ++ManglingNumbers[Ty];
-}
diff --git a/lib/AST/MicrosoftCXXABI.cpp b/lib/AST/MicrosoftCXXABI.cpp
index 6870315b2160..0603d3b7b9b5 100644
--- a/lib/AST/MicrosoftCXXABI.cpp
+++ b/lib/AST/MicrosoftCXXABI.cpp
@@ -28,7 +28,28 @@ namespace {
/// \brief Numbers things which need to correspond across multiple TUs.
/// Typically these are things like static locals, lambdas, or blocks.
class MicrosoftNumberingContext : public MangleNumberingContext {
+ llvm::DenseMap<const Type *, unsigned> ManglingNumbers;
+ unsigned LambdaManglingNumber;
+ unsigned StaticLocalNumber;
+
public:
+ MicrosoftNumberingContext()
+ : MangleNumberingContext(), LambdaManglingNumber(0),
+ StaticLocalNumber(0) {}
+
+ unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override {
+ return ++LambdaManglingNumber;
+ }
+
+ unsigned getManglingNumber(const BlockDecl *BD) override {
+ const Type *Ty = nullptr;
+ return ++ManglingNumbers[Ty];
+ }
+
+ unsigned getStaticLocalNumber(const VarDecl *VD) override {
+ return ++StaticLocalNumber;
+ }
+
unsigned getManglingNumber(const VarDecl *VD,
unsigned MSLocalManglingNumber) override {
return MSLocalManglingNumber;
diff --git a/lib/AST/MicrosoftMangle.cpp b/lib/AST/MicrosoftMangle.cpp
index e6a6d09b4845..72f90f67cbdf 100644
--- a/lib/AST/MicrosoftMangle.cpp
+++ b/lib/AST/MicrosoftMangle.cpp
@@ -27,7 +27,6 @@
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/Support/MathExtras.h"
using namespace clang;
@@ -161,7 +160,7 @@ public:
unsigned &discriminator = Uniquifier[ND];
if (!discriminator)
discriminator = ++Discriminator[std::make_pair(DC, ND->getIdentifier())];
- disc = discriminator;
+ disc = discriminator + 1;
return true;
}
@@ -191,8 +190,8 @@ class MicrosoftCXXNameMangler {
const NamedDecl *Structor;
unsigned StructorType;
- typedef llvm::StringMap<unsigned> BackRefMap;
- BackRefMap NameBackReferences;
+ typedef llvm::SmallVector<std::string, 10> BackRefVec;
+ BackRefVec NameBackReferences;
typedef llvm::DenseMap<void *, unsigned> ArgBackRefMap;
ArgBackRefMap TypeBackReferences;
@@ -234,7 +233,7 @@ public:
QualifierMangleMode QMM = QMM_Mangle);
void mangleFunctionType(const FunctionType *T,
const FunctionDecl *D = nullptr,
- bool ForceInstMethod = false);
+ bool ForceThisQuals = false);
void mangleNestedName(const NamedDecl *ND);
private:
@@ -279,7 +278,8 @@ private:
void mangleTemplateArgs(const TemplateDecl *TD,
const TemplateArgumentList &TemplateArgs);
- void mangleTemplateArg(const TemplateDecl *TD, const TemplateArgument &TA);
+ void mangleTemplateArg(const TemplateDecl *TD, const TemplateArgument &TA,
+ const NamedDecl *Parm);
};
}
@@ -338,9 +338,7 @@ bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
bool
MicrosoftMangleContextImpl::shouldMangleStringLiteral(const StringLiteral *SL) {
- return SL->isAscii() || SL->isWide();
- // TODO: This needs to be updated when MSVC gains support for Unicode
- // literals.
+ return true;
}
void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
@@ -441,7 +439,7 @@ void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
mangleQualifiers(Ty.getQualifiers(), false);
} else {
mangleType(Ty, SR, QMM_Drop);
- mangleQualifiers(Ty.getLocalQualifiers(), false);
+ mangleQualifiers(Ty.getQualifiers(), false);
}
}
@@ -801,10 +799,7 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
// <postfix> ::= <unqualified-name> [<postfix>]
// ::= <substitution> [<postfix>]
- if (isLambda(ND))
- return;
-
- const DeclContext *DC = ND->getDeclContext();
+ const DeclContext *DC = getEffectiveDeclContext(ND);
while (!DC->isTranslationUnit()) {
if (isa<TagDecl>(ND) || isa<VarDecl>(ND)) {
@@ -856,6 +851,8 @@ void MicrosoftCXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
// <operator-name> ::= ?_E # vector deleting destructor
// FIXME: Add a vector deleting dtor type. It goes in the vtable, so we need
// it.
+ case Dtor_Comdat:
+ llvm_unreachable("not expecting a COMDAT");
}
llvm_unreachable("Unsupported dtor type?");
}
@@ -994,22 +991,14 @@ void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO,
void MicrosoftCXXNameMangler::mangleSourceName(StringRef Name) {
// <source name> ::= <identifier> @
- BackRefMap::iterator Found;
- if (NameBackReferences.size() < 10) {
- size_t Size = NameBackReferences.size();
- bool Inserted;
- std::tie(Found, Inserted) =
- NameBackReferences.insert(std::make_pair(Name, Size));
- if (Inserted)
- Found = NameBackReferences.end();
- } else {
- Found = NameBackReferences.find(Name);
- }
-
+ BackRefVec::iterator Found =
+ std::find(NameBackReferences.begin(), NameBackReferences.end(), Name);
if (Found == NameBackReferences.end()) {
+ if (NameBackReferences.size() < 10)
+ NameBackReferences.push_back(Name);
Out << Name << '@';
} else {
- Out << Found->second;
+ Out << (Found - NameBackReferences.begin());
}
}
@@ -1025,7 +1014,7 @@ void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
// Templates have their own context for back references.
ArgBackRefMap OuterArgsContext;
- BackRefMap OuterTemplateContext;
+ BackRefVec OuterTemplateContext;
NameBackReferences.swap(OuterTemplateContext);
TypeBackReferences.swap(OuterArgsContext);
@@ -1051,8 +1040,10 @@ void MicrosoftCXXNameMangler::mangleIntegerLiteral(const llvm::APSInt &Value,
// Make sure booleans are encoded as 0/1.
if (IsBoolean && Value.getBoolValue())
mangleNumber(1);
- else
+ else if (Value.isSigned())
mangleNumber(Value.getSExtValue());
+ else
+ mangleNumber(Value.getZExtValue());
}
void MicrosoftCXXNameMangler::mangleExpression(const Expr *E) {
@@ -1104,12 +1095,18 @@ void MicrosoftCXXNameMangler::mangleExpression(const Expr *E) {
void MicrosoftCXXNameMangler::mangleTemplateArgs(
const TemplateDecl *TD, const TemplateArgumentList &TemplateArgs) {
// <template-args> ::= <template-arg>+
+ const TemplateParameterList *TPL = TD->getTemplateParameters();
+ assert(TPL->size() == TemplateArgs.size() &&
+ "size mismatch between args and parms!");
+
+ unsigned Idx = 0;
for (const TemplateArgument &TA : TemplateArgs.asArray())
- mangleTemplateArg(TD, TA);
+ mangleTemplateArg(TD, TA, TPL->getParam(Idx++));
}
void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
- const TemplateArgument &TA) {
+ const TemplateArgument &TA,
+ const NamedDecl *Parm) {
// <template-arg> ::= <type>
// ::= <integer-literal>
// ::= <member-data-pointer>
@@ -1142,7 +1139,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
else
mangle(FD, "$1?");
} else {
- mangle(ND, TA.isDeclForReferenceParam() ? "$E?" : "$1?");
+ mangle(ND, TA.getParamTypeForDecl()->isReferenceType() ? "$E?" : "$1?");
}
break;
}
@@ -1172,18 +1169,33 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
case TemplateArgument::Pack: {
ArrayRef<TemplateArgument> TemplateArgs = TA.getPackAsArray();
if (TemplateArgs.empty()) {
- Out << "$S";
+ if (isa<TemplateTypeParmDecl>(Parm) ||
+ isa<TemplateTemplateParmDecl>(Parm))
+ Out << "$$V";
+ else if (isa<NonTypeTemplateParmDecl>(Parm))
+ Out << "$S";
+ else
+ llvm_unreachable("unexpected template parameter decl!");
} else {
for (const TemplateArgument &PA : TemplateArgs)
- mangleTemplateArg(TD, PA);
+ mangleTemplateArg(TD, PA, Parm);
}
break;
}
- case TemplateArgument::Template:
- mangleType(cast<TagDecl>(
- TA.getAsTemplate().getAsTemplateDecl()->getTemplatedDecl()));
+ case TemplateArgument::Template: {
+ const NamedDecl *ND =
+ TA.getAsTemplate().getAsTemplateDecl()->getTemplatedDecl();
+ if (const auto *TD = dyn_cast<TagDecl>(ND)) {
+ mangleType(TD);
+ } else if (isa<TypeAliasDecl>(ND)) {
+ Out << "$$Y";
+ mangleName(ND);
+ } else {
+ llvm_unreachable("unexpected template template NamedDecl!");
+ }
break;
}
+ }
}
void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
@@ -1473,6 +1485,8 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T,
case BuiltinType::Int128: Out << "_L"; break;
case BuiltinType::UInt128: Out << "_M"; break;
case BuiltinType::Bool: Out << "_N"; break;
+ case BuiltinType::Char16: Out << "_S"; break;
+ case BuiltinType::Char32: Out << "_U"; break;
case BuiltinType::WChar_S:
case BuiltinType::WChar_U: Out << "_W"; break;
@@ -1498,8 +1512,6 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T,
case BuiltinType::NullPtr: Out << "$$T"; break;
- case BuiltinType::Char16:
- case BuiltinType::Char32:
case BuiltinType::Half: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
@@ -1518,8 +1530,13 @@ void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T,
// Structors only appear in decls, so at this point we know it's not a
// structor type.
// FIXME: This may not be lambda-friendly.
- Out << "$$A6";
- mangleFunctionType(T);
+ if (T->getTypeQuals() || T->getRefQualifier() != RQ_None) {
+ Out << "$$A8@@";
+ mangleFunctionType(T, /*D=*/nullptr, /*ForceThisQuals=*/true);
+ } else {
+ Out << "$$A6";
+ mangleFunctionType(T);
+ }
}
void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T,
SourceRange) {
@@ -1528,7 +1545,7 @@ void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T,
void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
const FunctionDecl *D,
- bool ForceInstMethod) {
+ bool ForceThisQuals) {
// <function-type> ::= <this-cvr-qualifiers> <calling-convention>
// <return-type> <argument-list> <throw-spec>
const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
@@ -1536,21 +1553,21 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
SourceRange Range;
if (D) Range = D->getSourceRange();
- bool IsStructor = false, IsInstMethod = ForceInstMethod;
+ bool IsStructor = false, HasThisQuals = ForceThisQuals;
if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(D)) {
if (MD->isInstance())
- IsInstMethod = true;
+ HasThisQuals = true;
if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))
IsStructor = true;
}
// If this is a C++ instance method, mangle the CVR qualifiers for the
// this pointer.
- if (IsInstMethod) {
+ if (HasThisQuals) {
Qualifiers Quals = Qualifiers::fromCVRMask(Proto->getTypeQuals());
- manglePointerExtQualifiers(Quals, nullptr);
+ manglePointerExtQualifiers(Quals, /*PointeeType=*/nullptr);
mangleRefQualifier(Proto->getRefQualifier());
- mangleQualifiers(Quals, false);
+ mangleQualifiers(Quals, /*IsMember=*/false);
}
mangleCallingConvention(T);
@@ -1670,6 +1687,7 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
// ::= H # __export __stdcall
// ::= I # __fastcall
// ::= J # __export __fastcall
+ // ::= Q # __vectorcall
// The 'export' calling conventions are from a bygone era
// (*cough*Win16*cough*) when functions were declared for export with
// that keyword. (It didn't actually export them, it just made them so
@@ -1686,6 +1704,7 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
case CC_X86ThisCall: Out << 'E'; break;
case CC_X86StdCall: Out << 'G'; break;
case CC_X86FastCall: Out << 'I'; break;
+ case CC_X86VectorCall: Out << 'Q'; break;
}
}
void MicrosoftCXXNameMangler::mangleThrowSpecification(
@@ -2331,7 +2350,7 @@ void MicrosoftMangleContextImpl::mangleReferenceTemporary(const VarDecl *VD,
void MicrosoftMangleContextImpl::mangleStaticGuardVariable(const VarDecl *VD,
raw_ostream &Out) {
- // TODO: This is not correct, especially with respect to MSVC2013. MSVC2013
+ // TODO: This is not correct, especially with respect to VS "14". VS "14"
// utilizes thread local variables to implement thread safe, re-entrant
// initialization for statics. They no longer differentiate between an
// externally visible and non-externally visible static with respect to
@@ -2420,14 +2439,10 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
Mangler.getStream() << "\01??_C@_";
// <char-type>: The "kind" of string literal is encoded into the mangled name.
- // TODO: This needs to be updated when MSVC gains support for unicode
- // literals.
- if (SL->isAscii())
- Mangler.getStream() << '0';
- else if (SL->isWide())
+ if (SL->isWide())
Mangler.getStream() << '1';
else
- llvm_unreachable("unexpected string literal kind!");
+ Mangler.getStream() << '0';
// <literal-length>: The next part of the mangled name consists of the length
// of the string.
@@ -2506,42 +2521,16 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
} else if (isLetter(Byte & 0x7f)) {
Mangler.getStream() << '?' << static_cast<char>(Byte & 0x7f);
} else {
- switch (Byte) {
- case ',':
- Mangler.getStream() << "?0";
- break;
- case '/':
- Mangler.getStream() << "?1";
- break;
- case '\\':
- Mangler.getStream() << "?2";
- break;
- case ':':
- Mangler.getStream() << "?3";
- break;
- case '.':
- Mangler.getStream() << "?4";
- break;
- case ' ':
- Mangler.getStream() << "?5";
- break;
- case '\n':
- Mangler.getStream() << "?6";
- break;
- case '\t':
- Mangler.getStream() << "?7";
- break;
- case '\'':
- Mangler.getStream() << "?8";
- break;
- case '-':
- Mangler.getStream() << "?9";
- break;
- default:
- Mangler.getStream() << "?$";
- Mangler.getStream() << static_cast<char>('A' + ((Byte >> 4) & 0xf));
- Mangler.getStream() << static_cast<char>('A' + (Byte & 0xf));
- break;
+ const char SpecialChars[] = {',', '/', '\\', ':', '.',
+ ' ', '\n', '\t', '\'', '-'};
+ const char *Pos =
+ std::find(std::begin(SpecialChars), std::end(SpecialChars), Byte);
+ if (Pos != std::end(SpecialChars)) {
+ Mangler.getStream() << '?' << (Pos - std::begin(SpecialChars));
+ } else {
+ Mangler.getStream() << "?$";
+ Mangler.getStream() << static_cast<char>('A' + ((Byte >> 4) & 0xf));
+ Mangler.getStream() << static_cast<char>('A' + (Byte & 0xf));
}
}
};
@@ -2550,7 +2539,10 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
unsigned NumCharsToMangle = std::min(32U, SL->getLength());
for (unsigned I = 0, E = NumCharsToMangle * SL->getCharByteWidth(); I != E;
++I)
- MangleByte(GetBigEndianByte(I));
+ if (SL->isWide())
+ MangleByte(GetBigEndianByte(I));
+ else
+ MangleByte(GetLittleEndianByte(I));
// Encode the NUL terminator if there is room.
if (NumCharsToMangle < 32)
diff --git a/lib/AST/NSAPI.cpp b/lib/AST/NSAPI.cpp
index 986b3b53983d..3dc750a81787 100644
--- a/lib/AST/NSAPI.cpp
+++ b/lib/AST/NSAPI.cpp
@@ -10,6 +10,7 @@
#include "clang/AST/NSAPI.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
+#include "llvm/ADT/StringSwitch.h"
using namespace clang;
@@ -46,6 +47,10 @@ Selector NSAPI::getNSStringSelector(NSStringMethodKind MK) const {
Sel = Ctx.Selectors.getUnarySelector(
&Ctx.Idents.get("stringWithUTF8String"));
break;
+ case NSStr_initWithUTF8String:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("initWithUTF8String"));
+ break;
case NSStr_stringWithCStringEncoding: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("stringWithCString"),
@@ -379,6 +384,32 @@ bool NSAPI::isObjCNSUIntegerType(QualType T) const {
return isObjCTypedef(T, "NSUInteger", NSUIntegerId);
}
+StringRef NSAPI::GetNSIntegralKind(QualType T) const {
+ if (!Ctx.getLangOpts().ObjC1 || T.isNull())
+ return StringRef();
+
+ while (const TypedefType *TDT = T->getAs<TypedefType>()) {
+ StringRef NSIntegralResust =
+ llvm::StringSwitch<StringRef>(
+ TDT->getDecl()->getDeclName().getAsIdentifierInfo()->getName())
+ .Case("int8_t", "int8_t")
+ .Case("int16_t", "int16_t")
+ .Case("int32_t", "int32_t")
+ .Case("NSInteger", "NSInteger")
+ .Case("int64_t", "int64_t")
+ .Case("uint8_t", "uint8_t")
+ .Case("uint16_t", "uint16_t")
+ .Case("uint32_t", "uint32_t")
+ .Case("NSUInteger", "NSUInteger")
+ .Case("uint64_t", "uint64_t")
+ .Default(StringRef());
+ if (!NSIntegralResust.empty())
+ return NSIntegralResust;
+ T = TDT->desugar();
+ }
+ return StringRef();
+}
+
bool NSAPI::isObjCTypedef(QualType T,
StringRef name, IdentifierInfo *&II) const {
if (!Ctx.getLangOpts().ObjC1)
diff --git a/lib/AST/NestedNameSpecifier.cpp b/lib/AST/NestedNameSpecifier.cpp
index 1f041aa49542..50a00502ca9f 100644
--- a/lib/AST/NestedNameSpecifier.cpp
+++ b/lib/AST/NestedNameSpecifier.cpp
@@ -66,7 +66,7 @@ NestedNameSpecifier::Create(const ASTContext &Context,
"Broken nested name specifier");
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(Prefix);
- Mockup.Prefix.setInt(StoredNamespaceOrAlias);
+ Mockup.Prefix.setInt(StoredDecl);
Mockup.Specifier = const_cast<NamespaceDecl *>(NS);
return FindOrInsert(Context, Mockup);
}
@@ -82,7 +82,7 @@ NestedNameSpecifier::Create(const ASTContext &Context,
"Broken nested name specifier");
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(Prefix);
- Mockup.Prefix.setInt(StoredNamespaceOrAlias);
+ Mockup.Prefix.setInt(StoredDecl);
Mockup.Specifier = Alias;
return FindOrInsert(Context, Mockup);
}
@@ -118,6 +118,16 @@ NestedNameSpecifier::GlobalSpecifier(const ASTContext &Context) {
return Context.GlobalNestedNameSpecifier;
}
+NestedNameSpecifier *
+NestedNameSpecifier::SuperSpecifier(const ASTContext &Context,
+ CXXRecordDecl *RD) {
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(nullptr);
+ Mockup.Prefix.setInt(StoredDecl);
+ Mockup.Specifier = RD;
+ return FindOrInsert(Context, Mockup);
+}
+
NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const {
if (!Specifier)
return Global;
@@ -126,9 +136,12 @@ NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const {
case StoredIdentifier:
return Identifier;
- case StoredNamespaceOrAlias:
- return isa<NamespaceDecl>(static_cast<NamedDecl *>(Specifier))? Namespace
- : NamespaceAlias;
+ case StoredDecl: {
+ NamedDecl *ND = static_cast<NamedDecl *>(Specifier);
+ if (isa<CXXRecordDecl>(ND))
+ return Super;
+ return isa<NamespaceDecl>(ND) ? Namespace : NamespaceAlias;
+ }
case StoredTypeSpec:
return TypeSpec;
@@ -140,24 +153,29 @@ NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const {
llvm_unreachable("Invalid NNS Kind!");
}
-/// \brief Retrieve the namespace stored in this nested name
-/// specifier.
+/// \brief Retrieve the namespace stored in this nested name specifier.
NamespaceDecl *NestedNameSpecifier::getAsNamespace() const {
- if (Prefix.getInt() == StoredNamespaceOrAlias)
+ if (Prefix.getInt() == StoredDecl)
return dyn_cast<NamespaceDecl>(static_cast<NamedDecl *>(Specifier));
return nullptr;
}
-/// \brief Retrieve the namespace alias stored in this nested name
-/// specifier.
+/// \brief Retrieve the namespace alias stored in this nested name specifier.
NamespaceAliasDecl *NestedNameSpecifier::getAsNamespaceAlias() const {
- if (Prefix.getInt() == StoredNamespaceOrAlias)
+ if (Prefix.getInt() == StoredDecl)
return dyn_cast<NamespaceAliasDecl>(static_cast<NamedDecl *>(Specifier));
return nullptr;
}
+/// \brief Retrieve the record declaration stored in this nested name specifier.
+CXXRecordDecl *NestedNameSpecifier::getAsRecordDecl() const {
+ if (Prefix.getInt() == StoredDecl)
+ return dyn_cast<CXXRecordDecl>(static_cast<NamedDecl *>(Specifier));
+
+ return nullptr;
+}
/// \brief Whether this nested name specifier refers to a dependent
/// type or not.
@@ -172,6 +190,15 @@ bool NestedNameSpecifier::isDependent() const {
case Global:
return false;
+ case Super: {
+ CXXRecordDecl *RD = static_cast<CXXRecordDecl *>(Specifier);
+ for (const auto &Base : RD->bases())
+ if (Base.getType()->isDependentType())
+ return true;
+
+ return false;
+ }
+
case TypeSpec:
case TypeSpecWithTemplate:
return getAsType()->isDependentType();
@@ -191,8 +218,9 @@ bool NestedNameSpecifier::isInstantiationDependent() const {
case Namespace:
case NamespaceAlias:
case Global:
+ case Super:
return false;
-
+
case TypeSpec:
case TypeSpecWithTemplate:
return getAsType()->isInstantiationDependentType();
@@ -209,6 +237,7 @@ bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
case Namespace:
case NamespaceAlias:
case Global:
+ case Super:
return false;
case TypeSpec:
@@ -246,6 +275,10 @@ NestedNameSpecifier::print(raw_ostream &OS,
case Global:
break;
+ case Super:
+ OS << "__super";
+ break;
+
case TypeSpecWithTemplate:
OS << "template ";
// Fall through to print the type.
@@ -304,6 +337,7 @@ NestedNameSpecifierLoc::getLocalDataLength(NestedNameSpecifier *Qualifier) {
case NestedNameSpecifier::Identifier:
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Super:
// The location of the identifier or namespace name.
Length += sizeof(unsigned);
break;
@@ -369,6 +403,7 @@ SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const {
case NestedNameSpecifier::Identifier:
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Super:
return SourceRange(LoadSourceLocation(Data, Offset),
LoadSourceLocation(Data, Offset + sizeof(unsigned)));
@@ -552,6 +587,17 @@ void NestedNameSpecifierLocBuilder::MakeGlobal(ASTContext &Context,
SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
}
+void NestedNameSpecifierLocBuilder::MakeSuper(ASTContext &Context,
+ CXXRecordDecl *RD,
+ SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::SuperSpecifier(Context, RD);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(SuperLoc, Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
void NestedNameSpecifierLocBuilder::MakeTrivial(ASTContext &Context,
NestedNameSpecifier *Qualifier,
SourceRange R) {
@@ -583,6 +629,7 @@ void NestedNameSpecifierLocBuilder::MakeTrivial(ASTContext &Context,
}
case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
break;
}
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index b3deebaecfab..0d070a4bfbfc 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -636,23 +636,12 @@ protected:
HasOwnVFPtr(false),
FirstNearlyEmptyVBase(nullptr) {}
- /// Reset this RecordLayoutBuilder to a fresh state, using the given
- /// alignment as the initial alignment. This is used for the
- /// correct layout of vb-table pointers in MSVC.
- void resetWithTargetAlignment(CharUnits TargetAlignment) {
- const ASTContext &Context = this->Context;
- EmptySubobjectMap *EmptySubobjects = this->EmptySubobjects;
- this->~RecordLayoutBuilder();
- new (this) RecordLayoutBuilder(Context, EmptySubobjects);
- Alignment = UnpackedAlignment = TargetAlignment;
- }
-
void Layout(const RecordDecl *D);
void Layout(const CXXRecordDecl *D);
void Layout(const ObjCInterfaceDecl *D);
void LayoutFields(const RecordDecl *D);
- void LayoutField(const FieldDecl *D);
+ void LayoutField(const FieldDecl *D, bool InsertExtraPadding);
void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize,
bool FieldPacked, const FieldDecl *D);
void LayoutBitField(const FieldDecl *D);
@@ -1104,7 +1093,7 @@ RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
// Only lay out the virtual base if it's not an indirect primary base.
if (!IndirectPrimaryBase) {
// Only visit virtual bases once.
- if (!VisitedVirtualBases.insert(BaseDecl))
+ if (!VisitedVirtualBases.insert(BaseDecl).second)
continue;
const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl);
@@ -1331,7 +1320,7 @@ void RecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
// Layout each ivar sequentially.
for (const ObjCIvarDecl *IVD = D->all_declared_ivar_begin(); IVD;
IVD = IVD->getNextIvar())
- LayoutField(IVD);
+ LayoutField(IVD, false);
// Finally, round the size of the total struct up to the alignment of the
// struct itself.
@@ -1341,8 +1330,22 @@ void RecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
// Layout each field, for now, just sequentially, respecting alignment. In
// the future, this will need to be tweakable by targets.
- for (const auto *Field : D->fields())
- LayoutField(Field);
+ bool InsertExtraPadding = D->mayInsertExtraPadding(/*EmitRemark=*/true);
+ bool HasFlexibleArrayMember = D->hasFlexibleArrayMember();
+ for (auto I = D->field_begin(), End = D->field_end(); I != End; ++I) {
+ auto Next(I);
+ ++Next;
+ LayoutField(*I,
+ InsertExtraPadding && (Next != End || !HasFlexibleArrayMember));
+ }
+}
+
+// Rounds the specified size to have it a multiple of the char size.
+static uint64_t
+roundUpSizeToCharAlignment(uint64_t Size,
+ const ASTContext &Context) {
+ uint64_t CharAlignment = Context.getTargetInfo().getCharAlign();
+ return llvm::RoundUpToAlignment(Size, CharAlignment);
}
void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
@@ -1382,7 +1385,9 @@ void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit;
if (IsUnion) {
- setDataSize(std::max(getDataSizeInBits(), FieldSize));
+ uint64_t RoundedFieldSize = roundUpSizeToCharAlignment(FieldSize,
+ Context);
+ setDataSize(std::max(getDataSizeInBits(), RoundedFieldSize));
FieldOffset = 0;
} else {
// The bitfield is allocated starting at the next offset aligned
@@ -1413,9 +1418,9 @@ void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
uint64_t FieldSize = D->getBitWidthValue(Context);
- std::pair<uint64_t, unsigned> FieldInfo = Context.getTypeInfo(D->getType());
- uint64_t TypeSize = FieldInfo.first;
- unsigned FieldAlign = FieldInfo.second;
+ TypeInfo FieldInfo = Context.getTypeInfo(D->getType());
+ uint64_t TypeSize = FieldInfo.Width;
+ unsigned FieldAlign = FieldInfo.Align;
// UnfilledBitsInLastUnit is the difference between the end of the
// last allocated bitfield (i.e. the first bit offset available for
@@ -1607,9 +1612,9 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
// For unions, this is just a max operation, as usual.
if (IsUnion) {
- // FIXME: I think FieldSize should be TypeSize here.
- setDataSize(std::max(getDataSizeInBits(), FieldSize));
-
+ uint64_t RoundedFieldSize = roundUpSizeToCharAlignment(FieldSize,
+ Context);
+ setDataSize(std::max(getDataSizeInBits(), RoundedFieldSize));
// For non-zero-width bitfields in ms_struct structs, allocate a new
// storage unit if necessary.
} else if (IsMsStruct && FieldSize) {
@@ -1645,7 +1650,8 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
Context.toCharUnitsFromBits(UnpackedFieldAlign));
}
-void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
+void RecordLayoutBuilder::LayoutField(const FieldDecl *D,
+ bool InsertExtraPadding) {
if (D->isBitField()) {
LayoutBitField(D);
return;
@@ -1749,6 +1755,15 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
Context.toBits(UnpackedFieldOffset),
Context.toBits(UnpackedFieldAlign), FieldPacked, D);
+ if (InsertExtraPadding) {
+ CharUnits ASanAlignment = CharUnits::fromQuantity(8);
+ CharUnits ExtraSizeForAsan = ASanAlignment;
+ if (FieldSize % ASanAlignment)
+ ExtraSizeForAsan +=
+ ASanAlignment - CharUnits::fromQuantity(FieldSize % ASanAlignment);
+ FieldSize += ExtraSizeForAsan;
+ }
+
// Reserve space for this field.
uint64_t FieldSizeInBits = Context.toBits(FieldSize);
if (IsUnion)
@@ -2097,7 +2112,7 @@ static bool isMsLayout(const RecordDecl* D) {
// * There is a distinction between alignment and required alignment.
// __declspec(align) changes the required alignment of a struct. This
// alignment is _always_ obeyed, even in the presence of #pragma pack. A
-// record inherites required alignment from all of its fields an bases.
+// record inherits required alignment from all of its fields and bases.
// * __declspec(align) on bitfields has the effect of changing the bitfield's
// alignment instead of its required alignment. This is the only known way
// to make the alignment of a struct bigger than 8. Interestingly enough
@@ -2181,8 +2196,9 @@ public:
FieldOffsets.push_back(FieldOffset);
}
/// \brief Compute the set of virtual bases for which vtordisps are required.
- llvm::SmallPtrSet<const CXXRecordDecl *, 2>
- computeVtorDispSet(const CXXRecordDecl *RD);
+ void computeVtorDispSet(
+ llvm::SmallPtrSetImpl<const CXXRecordDecl *> &HasVtorDispSet,
+ const CXXRecordDecl *RD) const;
const ASTContext &Context;
/// \brief The size of the record being laid out.
CharUnits Size;
@@ -2203,6 +2219,8 @@ public:
CharUnits CurrentBitfieldSize;
/// \brief Offset to the virtual base table pointer (if one exists).
CharUnits VBPtrOffset;
+ /// \brief Minimum record size possible.
+ CharUnits MinEmptyStructSize;
/// \brief The size and alignment info of a pointer.
ElementInfo PointerInfo;
/// \brief The primary base class (if one exists).
@@ -2260,12 +2278,18 @@ MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
MicrosoftRecordLayoutBuilder::ElementInfo
MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
const FieldDecl *FD) {
+ // Get the alignment of the field type's natural alignment, ignore any
+ // alignment attributes.
ElementInfo Info;
std::tie(Info.Size, Info.Alignment) =
- Context.getTypeInfoInChars(FD->getType());
- // Respect align attributes.
- CharUnits FieldRequiredAlignment =
+ Context.getTypeInfoInChars(FD->getType()->getUnqualifiedDesugaredType());
+ // Respect align attributes on the field.
+ CharUnits FieldRequiredAlignment =
Context.toCharUnitsFromBits(FD->getMaxAlignment());
+ // Respect align attributes on the type.
+ if (Context.isAlignmentRequired(FD->getType()))
+ FieldRequiredAlignment = std::max(
+ Context.getTypeAlignInChars(FD->getType()), FieldRequiredAlignment);
// Respect attributes applied to subobjects of the field.
if (FD->isBitField())
// For some reason __declspec align impacts alignment rather than required
@@ -2292,6 +2316,8 @@ MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
}
void MicrosoftRecordLayoutBuilder::layout(const RecordDecl *RD) {
+ // For C record layout, zero-sized records always have size 4.
+ MinEmptyStructSize = CharUnits::fromQuantity(4);
initializeLayout(RD);
layoutFields(RD);
DataSize = Size = Size.RoundUpToAlignment(Alignment);
@@ -2301,6 +2327,8 @@ void MicrosoftRecordLayoutBuilder::layout(const RecordDecl *RD) {
}
void MicrosoftRecordLayoutBuilder::cxxLayout(const CXXRecordDecl *RD) {
+ // The C++ standard says that empty structs have size 1.
+ MinEmptyStructSize = CharUnits::One();
initializeLayout(RD);
initializeCXXLayout(RD);
layoutNonVirtualBases(RD);
@@ -2599,14 +2627,14 @@ void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) {
}
VtorDispAlignment = std::max(VtorDispAlignment, RequiredAlignment);
// Compute the vtordisp set.
- llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordispSet =
- computeVtorDispSet(RD);
+ llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtorDispSet;
+ computeVtorDispSet(HasVtorDispSet, RD);
// Iterate through the virtual bases and lay them out.
const ASTRecordLayout *PreviousBaseLayout = nullptr;
for (const CXXBaseSpecifier &VBase : RD->vbases()) {
const CXXRecordDecl *BaseDecl = VBase.getType()->getAsCXXRecordDecl();
const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
- bool HasVtordisp = HasVtordispSet.count(BaseDecl);
+ bool HasVtordisp = HasVtorDispSet.count(BaseDecl) > 0;
// Insert padding between two bases if the left first one is zero sized or
// contains a zero sized subobject and the right is zero sized or one leads
// with a zero sized base. The padding between virtual bases is 4
@@ -2629,7 +2657,7 @@ void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) {
void MicrosoftRecordLayoutBuilder::finalizeLayout(const RecordDecl *RD) {
// Respect required alignment. Note that in 32-bit mode Required alignment
- // may be 0 nad cause size not to be updated.
+ // may be 0 and cause size not to be updated.
DataSize = Size;
if (!RequiredAlignment.isZero()) {
Alignment = std::max(Alignment, RequiredAlignment);
@@ -2639,11 +2667,15 @@ void MicrosoftRecordLayoutBuilder::finalizeLayout(const RecordDecl *RD) {
RoundingAlignment = std::max(RoundingAlignment, RequiredAlignment);
Size = Size.RoundUpToAlignment(RoundingAlignment);
}
- // Zero-sized structures have size equal to their alignment.
if (Size.isZero()) {
EndsWithZeroSizedObject = true;
LeadsWithZeroSizedBase = true;
- Size = Alignment;
+ // Zero-sized structures have size equal to their alignment if a
+ // __declspec(align) came into play.
+ if (RequiredAlignment >= MinEmptyStructSize)
+ Size = Alignment;
+ else
+ Size = MinEmptyStructSize;
}
}
@@ -2665,10 +2697,9 @@ RequiresVtordisp(const llvm::SmallPtrSetImpl<const CXXRecordDecl *> &
return false;
}
-llvm::SmallPtrSet<const CXXRecordDecl *, 2>
-MicrosoftRecordLayoutBuilder::computeVtorDispSet(const CXXRecordDecl *RD) {
- llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordispSet;
-
+void MicrosoftRecordLayoutBuilder::computeVtorDispSet(
+ llvm::SmallPtrSetImpl<const CXXRecordDecl *> &HasVtordispSet,
+ const CXXRecordDecl *RD) const {
// /vd2 or #pragma vtordisp(2): Always use vtordisps for virtual bases with
// vftables.
if (RD->getMSVtorDispMode() == MSVtorDispAttr::ForVFTable) {
@@ -2678,7 +2709,7 @@ MicrosoftRecordLayoutBuilder::computeVtorDispSet(const CXXRecordDecl *RD) {
if (Layout.hasExtendableVFPtr())
HasVtordispSet.insert(BaseDecl);
}
- return HasVtordispSet;
+ return;
}
// If any of our bases need a vtordisp for this type, so do we. Check our
@@ -2695,7 +2726,7 @@ MicrosoftRecordLayoutBuilder::computeVtorDispSet(const CXXRecordDecl *RD) {
// * #pragma vtordisp(0) or the /vd0 flag are in use.
if ((!RD->hasUserDeclaredConstructor() && !RD->hasUserDeclaredDestructor()) ||
RD->getMSVtorDispMode() == MSVtorDispAttr::Never)
- return HasVtordispSet;
+ return;
// /vd1 or #pragma vtordisp(1): Try to guess based on whether we think it's
// possible for a partially constructed object with virtual base overrides to
// escape a non-trivial constructor.
@@ -2706,9 +2737,9 @@ MicrosoftRecordLayoutBuilder::computeVtorDispSet(const CXXRecordDecl *RD) {
// vtordisp.
llvm::SmallPtrSet<const CXXMethodDecl *, 8> Work;
llvm::SmallPtrSet<const CXXRecordDecl *, 2> BasesWithOverriddenMethods;
- // Seed the working set with our non-destructor virtual methods.
+ // Seed the working set with our non-destructor, non-pure virtual methods.
for (const CXXMethodDecl *MD : RD->methods())
- if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD))
+ if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD) && !MD->isPure())
Work.insert(MD);
while (!Work.empty()) {
const CXXMethodDecl *MD = *Work.begin();
@@ -2730,7 +2761,6 @@ MicrosoftRecordLayoutBuilder::computeVtorDispSet(const CXXRecordDecl *RD) {
RequiresVtordisp(BasesWithOverriddenMethods, BaseDecl))
HasVtordispSet.insert(BaseDecl);
}
- return HasVtordispSet;
}
/// \brief Get or compute information about the layout of the specified record
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index a8483dc5bb99..68c7e7278432 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -104,6 +104,26 @@ Stmt *Stmt::IgnoreImplicit() {
return s;
}
+/// \brief Skip no-op (attributed, compound) container stmts and skip captured
+/// stmt at the top, if \a IgnoreCaptured is true.
+Stmt *Stmt::IgnoreContainers(bool IgnoreCaptured) {
+ Stmt *S = this;
+ if (IgnoreCaptured)
+ if (auto CapS = dyn_cast_or_null<CapturedStmt>(S))
+ S = CapS->getCapturedStmt();
+ while (true) {
+ if (auto AS = dyn_cast_or_null<AttributedStmt>(S))
+ S = AS->getSubStmt();
+ else if (auto CS = dyn_cast_or_null<CompoundStmt>(S)) {
+ if (CS->size() != 1)
+ break;
+ S = CS->body_back();
+ } else
+ break;
+ }
+ return S;
+}
+
/// \brief Strip off all label-like statements.
///
/// This will strip off label statements, case statements, attributed
@@ -254,7 +274,7 @@ SourceLocation Stmt::getLocEnd() const {
CompoundStmt::CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts,
SourceLocation LB, SourceLocation RB)
- : Stmt(CompoundStmtClass), LBracLoc(LB), RBracLoc(RB) {
+ : Stmt(CompoundStmtClass), LBraceLoc(LB), RBraceLoc(RB) {
CompoundStmtBits.NumStmts = Stmts.size();
assert(CompoundStmtBits.NumStmts == Stmts.size() &&
"NumStmts doesn't fit in bits of CompoundStmtBits.NumStmts!");
@@ -357,6 +377,11 @@ unsigned AsmStmt::getNumPlusOperands() const {
return Res;
}
+char GCCAsmStmt::AsmStringPiece::getModifier() const {
+ assert(isOperand() && "Only Operands can have modifiers.");
+ return isLetter(Str[0]) ? Str[0] : '\0';
+}
+
StringRef GCCAsmStmt::getClobber(unsigned i) const {
return getClobberStringLiteral(i)->getString();
}
@@ -517,17 +542,25 @@ unsigned GCCAsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
CurStringPiece.clear();
}
- // Handle %x4 and %x[foo] by capturing x as the modifier character.
- char Modifier = '\0';
+ // Handle operands that have asmSymbolicName (e.g., %x[foo]) and those that
+ // don't (e.g., %x4). 'x' following the '%' is the constraint modifier.
+
+ const char *Begin = CurPtr - 1; // Points to the character following '%'.
+ const char *Percent = Begin - 1; // Points to '%'.
+
if (isLetter(EscapedChar)) {
if (CurPtr == StrEnd) { // Premature end.
DiagOffs = CurPtr-StrStart-1;
return diag::err_asm_invalid_escape;
}
- Modifier = EscapedChar;
EscapedChar = *CurPtr++;
}
+ const TargetInfo &TI = C.getTargetInfo();
+ const SourceManager &SM = C.getSourceManager();
+ const LangOptions &LO = C.getLangOpts();
+
+ // Handle operands that don't have asmSymbolicName (e.g., %x4).
if (isDigit(EscapedChar)) {
// %n - Assembler operand n
unsigned N = 0;
@@ -543,11 +576,21 @@ unsigned GCCAsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
return diag::err_asm_invalid_operand_number;
}
- Pieces.push_back(AsmStringPiece(N, Modifier));
+ // Str contains "x4" (Operand without the leading %).
+ std::string Str(Begin, CurPtr - Begin);
+
+ // (BeginLoc, EndLoc) represents the range of the operand we are currently
+ // processing. Unlike Str, the range includes the leading '%'.
+ SourceLocation BeginLoc =
+ getAsmString()->getLocationOfByte(Percent - StrStart, SM, LO, TI);
+ SourceLocation EndLoc =
+ getAsmString()->getLocationOfByte(CurPtr - StrStart, SM, LO, TI);
+
+ Pieces.push_back(AsmStringPiece(N, Str, BeginLoc, EndLoc));
continue;
}
- // Handle %[foo], a symbolic operand reference.
+ // Handle operands that have asmSymbolicName (e.g., %x[foo]).
if (EscapedChar == '[') {
DiagOffs = CurPtr-StrStart-1;
@@ -566,7 +609,18 @@ unsigned GCCAsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
DiagOffs = CurPtr-StrStart;
return diag::err_asm_unknown_symbolic_operand_name;
}
- Pieces.push_back(AsmStringPiece(N, Modifier));
+
+ // Str contains "x[foo]" (Operand without the leading %).
+ std::string Str(Begin, NameEnd + 1 - Begin);
+
+ // (BeginLoc, EndLoc) represents the range of the operand we are currently
+ // processing. Unlike Str, the range includes the leading '%'.
+ SourceLocation BeginLoc =
+ getAsmString()->getLocationOfByte(Percent - StrStart, SM, LO, TI);
+ SourceLocation EndLoc =
+ getAsmString()->getLocationOfByte(NameEnd + 1 - StrStart, SM, LO, TI);
+
+ Pieces.push_back(AsmStringPiece(N, Str, BeginLoc, EndLoc));
CurPtr = NameEnd+1;
continue;
@@ -956,20 +1010,22 @@ Expr* ReturnStmt::getRetValue() {
return cast_or_null<Expr>(RetExpr);
}
-SEHTryStmt::SEHTryStmt(bool IsCXXTry, SourceLocation TryLoc, Stmt *TryBlock,
- Stmt *Handler, int HandlerIndex, int HandlerParentIndex)
- : Stmt(SEHTryStmtClass), IsCXXTry(IsCXXTry), TryLoc(TryLoc),
- HandlerIndex(HandlerIndex), HandlerParentIndex(HandlerParentIndex) {
- Children[TRY] = TryBlock;
+SEHTryStmt::SEHTryStmt(bool IsCXXTry,
+ SourceLocation TryLoc,
+ Stmt *TryBlock,
+ Stmt *Handler)
+ : Stmt(SEHTryStmtClass),
+ IsCXXTry(IsCXXTry),
+ TryLoc(TryLoc)
+{
+ Children[TRY] = TryBlock;
Children[HANDLER] = Handler;
}
-SEHTryStmt *SEHTryStmt::Create(const ASTContext &C, bool IsCXXTry,
+SEHTryStmt* SEHTryStmt::Create(const ASTContext &C, bool IsCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
- Stmt *Handler, int HandlerIndex,
- int HandlerParentIndex) {
- return new (C) SEHTryStmt(IsCXXTry, TryLoc, TryBlock, Handler, HandlerIndex,
- HandlerParentIndex);
+ Stmt *Handler) {
+ return new(C) SEHTryStmt(IsCXXTry,TryLoc,TryBlock,Handler);
}
SEHExceptStmt* SEHTryStmt::getExceptHandler() const {
@@ -1120,17 +1176,24 @@ StmtRange OMPClause::children() {
llvm_unreachable("unknown OMPClause");
}
-OMPPrivateClause *OMPPrivateClause::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc,
- ArrayRef<Expr *> VL) {
+void OMPPrivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
+ assert(VL.size() == varlist_size() &&
+ "Number of private copies is not the same as the preallocated buffer");
+ std::copy(VL.begin(), VL.end(), varlist_end());
+}
+
+OMPPrivateClause *
+OMPPrivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL) {
+ // Allocate space for private variables and initializer expressions.
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * VL.size());
- OMPPrivateClause *Clause = new (Mem) OMPPrivateClause(StartLoc, LParenLoc,
- EndLoc, VL.size());
+ 2 * sizeof(Expr *) * VL.size());
+ OMPPrivateClause *Clause =
+ new (Mem) OMPPrivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
+ Clause->setPrivateCopies(PrivateVL);
return Clause;
}
@@ -1138,23 +1201,35 @@ OMPPrivateClause *OMPPrivateClause::CreateEmpty(const ASTContext &C,
unsigned N) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * N);
+ 2 * sizeof(Expr *) * N);
return new (Mem) OMPPrivateClause(N);
}
-OMPFirstprivateClause *OMPFirstprivateClause::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc,
- ArrayRef<Expr *> VL) {
+void OMPFirstprivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
+ assert(VL.size() == varlist_size() &&
+ "Number of private copies is not the same as the preallocated buffer");
+ std::copy(VL.begin(), VL.end(), varlist_end());
+}
+
+void OMPFirstprivateClause::setInits(ArrayRef<Expr *> VL) {
+ assert(VL.size() == varlist_size() &&
+ "Number of inits is not the same as the preallocated buffer");
+ std::copy(VL.begin(), VL.end(), getPrivateCopies().end());
+}
+
+OMPFirstprivateClause *
+OMPFirstprivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
+ ArrayRef<Expr *> InitVL) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * VL.size());
- OMPFirstprivateClause *Clause = new (Mem) OMPFirstprivateClause(StartLoc,
- LParenLoc,
- EndLoc,
- VL.size());
+ 3 * sizeof(Expr *) * VL.size());
+ OMPFirstprivateClause *Clause =
+ new (Mem) OMPFirstprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
+ Clause->setPrivateCopies(PrivateVL);
+ Clause->setInits(InitVL);
return Clause;
}
@@ -1162,7 +1237,7 @@ OMPFirstprivateClause *OMPFirstprivateClause::CreateEmpty(const ASTContext &C,
unsigned N) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * N);
+ 3 * sizeof(Expr *) * N);
return new (Mem) OMPFirstprivateClause(N);
}
@@ -1306,6 +1381,24 @@ void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) {
std::copy(Clauses.begin(), Clauses.end(), getClauses().begin());
}
+void OMPLoopDirective::setCounters(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() &&
+ "Number of loop counters is not the same as the collapsed number");
+ std::copy(A.begin(), A.end(), getCounters().begin());
+}
+
+void OMPLoopDirective::setUpdates(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() &&
+ "Number of counter updates is not the same as the collapsed number");
+ std::copy(A.begin(), A.end(), getUpdates().begin());
+}
+
+void OMPLoopDirective::setFinals(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() &&
+ "Number of counter finals is not the same as the collapsed number");
+ std::copy(A.begin(), A.end(), getFinals().begin());
+}
+
OMPReductionClause *OMPReductionClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL,
@@ -1348,6 +1441,21 @@ OMPFlushClause *OMPFlushClause::CreateEmpty(const ASTContext &C, unsigned N) {
return new (Mem) OMPFlushClause(N);
}
+const OMPClause *
+OMPExecutableDirective::getSingleClause(OpenMPClauseKind K) const {
+ auto ClauseFilter =
+ [=](const OMPClause *C) -> bool { return C->getClauseKind() == K; };
+ OMPExecutableDirective::filtered_clause_iterator<decltype(ClauseFilter)> I(
+ clauses(), ClauseFilter);
+
+ if (I) {
+ auto *Clause = *I;
+ assert(!++I && "There are at least 2 clauses of the specified kind");
+ return Clause;
+ }
+ return nullptr;
+}
+
OMPParallelDirective *OMPParallelDirective::Create(
const ASTContext &C,
SourceLocation StartLoc,
@@ -1378,15 +1486,27 @@ OMPParallelDirective *OMPParallelDirective::CreateEmpty(const ASTContext &C,
OMPSimdDirective *
OMPSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
OMPSimdDirective *Dir = new (Mem)
OMPSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond, Exprs.SeparatedCond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
return Dir;
}
@@ -1397,22 +1517,42 @@ OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C,
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
return new (Mem) OMPSimdDirective(CollapsedNum, NumClauses);
}
OMPForDirective *
OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
OMPForDirective *Dir =
new (Mem) OMPForDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond, Exprs.SeparatedCond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
return Dir;
}
@@ -1423,10 +1563,57 @@ OMPForDirective *OMPForDirective::CreateEmpty(const ASTContext &C,
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
return new (Mem) OMPForDirective(CollapsedNum, NumClauses);
}
+OMPForSimdDirective *
+OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, unsigned CollapsedNum,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
+ OMPForSimdDirective *Dir = new (Mem)
+ OMPForSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond, Exprs.SeparatedCond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPForSimdDirective *OMPForSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
+ return new (Mem) OMPForSimdDirective(CollapsedNum, NumClauses);
+}
+
OMPSectionsDirective *OMPSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
@@ -1537,19 +1724,36 @@ OMPCriticalDirective *OMPCriticalDirective::CreateEmpty(const ASTContext &C,
return new (Mem) OMPCriticalDirective();
}
-OMPParallelForDirective *
-OMPParallelForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation EndLoc, unsigned CollapsedNum,
- ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt) {
+OMPParallelForDirective *OMPParallelForDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForDirective),
llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_parallel_for));
OMPParallelForDirective *Dir = new (Mem)
OMPParallelForDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond, Exprs.SeparatedCond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
return Dir;
}
@@ -1558,11 +1762,57 @@ OMPParallelForDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForDirective),
llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_parallel_for));
return new (Mem) OMPParallelForDirective(CollapsedNum, NumClauses);
}
+OMPParallelForSimdDirective *OMPParallelForSimdDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
+ OMPParallelForSimdDirective *Dir = new (Mem) OMPParallelForSimdDirective(
+ StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond, Exprs.SeparatedCond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPParallelForSimdDirective *
+OMPParallelForSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
+ return new (Mem) OMPParallelForSimdDirective(CollapsedNum, NumClauses);
+}
+
OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
@@ -1678,3 +1928,103 @@ OMPFlushDirective *OMPFlushDirective::CreateEmpty(const ASTContext &C,
return new (Mem) OMPFlushDirective(NumClauses);
}
+OMPOrderedDirective *OMPOrderedDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPOrderedDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ OMPOrderedDirective *Dir = new (Mem) OMPOrderedDirective(StartLoc, EndLoc);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPOrderedDirective *OMPOrderedDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPOrderedDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ return new (Mem) OMPOrderedDirective();
+}
+
+OMPAtomicDirective *
+OMPAtomicDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ 4 * sizeof(Stmt *));
+ OMPAtomicDirective *Dir =
+ new (Mem) OMPAtomicDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setX(X);
+ Dir->setV(V);
+ Dir->setExpr(E);
+ return Dir;
+}
+
+OMPAtomicDirective *OMPAtomicDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + 4 * sizeof(Stmt *));
+ return new (Mem) OMPAtomicDirective(NumClauses);
+}
+
+OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTargetDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPTargetDirective *Dir =
+ new (Mem) OMPTargetDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPTargetDirective *OMPTargetDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTargetDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPTargetDirective(NumClauses);
+}
+
+OMPTeamsDirective *OMPTeamsDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTeamsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPTeamsDirective *Dir =
+ new (Mem) OMPTeamsDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPTeamsDirective *OMPTeamsDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTeamsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPTeamsDirective(NumClauses);
+}
+
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index 1fdad9f4272c..927a679244b5 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -665,6 +665,22 @@ void OMPClausePrinter::VisitOMPMergeableClause(OMPMergeableClause *) {
OS << "mergeable";
}
+void OMPClausePrinter::VisitOMPReadClause(OMPReadClause *) { OS << "read"; }
+
+void OMPClausePrinter::VisitOMPWriteClause(OMPWriteClause *) { OS << "write"; }
+
+void OMPClausePrinter::VisitOMPUpdateClause(OMPUpdateClause *) {
+ OS << "update";
+}
+
+void OMPClausePrinter::VisitOMPCaptureClause(OMPCaptureClause *) {
+ OS << "capture";
+}
+
+void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) {
+ OS << "seq_cst";
+}
+
template<typename T>
void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
for (typename T::varlist_iterator I = Node->varlist_begin(),
@@ -820,6 +836,11 @@ void StmtPrinter::VisitOMPForDirective(OMPForDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPForSimdDirective(OMPForSimdDirective *Node) {
+ Indent() << "#pragma omp for simd ";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPSectionsDirective(OMPSectionsDirective *Node) {
Indent() << "#pragma omp sections ";
PrintOMPExecutableDirective(Node);
@@ -855,6 +876,12 @@ void StmtPrinter::VisitOMPParallelForDirective(OMPParallelForDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPParallelForSimdDirective(
+ OMPParallelForSimdDirective *Node) {
+ Indent() << "#pragma omp parallel for simd ";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *Node) {
Indent() << "#pragma omp parallel sections ";
@@ -886,6 +913,26 @@ void StmtPrinter::VisitOMPFlushDirective(OMPFlushDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPOrderedDirective(OMPOrderedDirective *Node) {
+ Indent() << "#pragma omp ordered";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPAtomicDirective(OMPAtomicDirective *Node) {
+ Indent() << "#pragma omp atomic ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTargetDirective(OMPTargetDirective *Node) {
+ Indent() << "#pragma omp target ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTeamsDirective(OMPTeamsDirective *Node) {
+ Indent() << "#pragma omp teams ";
+ PrintOMPExecutableDirective(Node);
+}
+
//===----------------------------------------------------------------------===//
// Expr printing methods.
//===----------------------------------------------------------------------===//
@@ -957,28 +1004,7 @@ void StmtPrinter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
}
void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) {
- switch (Node->getIdentType()) {
- default:
- llvm_unreachable("unknown case");
- case PredefinedExpr::Func:
- OS << "__func__";
- break;
- case PredefinedExpr::Function:
- OS << "__FUNCTION__";
- break;
- case PredefinedExpr::FuncDName:
- OS << "__FUNCDNAME__";
- break;
- case PredefinedExpr::FuncSig:
- OS << "__FUNCSIG__";
- break;
- case PredefinedExpr::LFunction:
- OS << "L__FUNCTION__";
- break;
- case PredefinedExpr::PrettyFunction:
- OS << "__PRETTY_FUNCTION__";
- break;
- }
+ OS << PredefinedExpr::getIdentTypeName(Node->getIdentType());
}
void StmtPrinter::VisitCharacterLiteral(CharacterLiteral *Node) {
@@ -1716,6 +1742,8 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
case LCK_ByCopy:
OS << C->getCapturedVar()->getName();
break;
+ case LCK_VLAType:
+ llvm_unreachable("VLA type in explicit captures.");
}
if (C->isInitCapture())
@@ -1994,6 +2022,20 @@ void StmtPrinter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *Node){
PrintExpr(Node->GetTemporaryExpr());
}
+void StmtPrinter::VisitCXXFoldExpr(CXXFoldExpr *E) {
+ OS << "(";
+ if (E->getLHS()) {
+ PrintExpr(E->getLHS());
+ OS << " " << BinaryOperator::getOpcodeStr(E->getOperator()) << " ";
+ }
+ OS << "...";
+ if (E->getRHS()) {
+ OS << " " << BinaryOperator::getOpcodeStr(E->getOperator()) << " ";
+ PrintExpr(E->getRHS());
+ }
+ OS << ")";
+}
+
// Obj-C
void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
@@ -2138,6 +2180,11 @@ void StmtPrinter::VisitOpaqueValueExpr(OpaqueValueExpr *Node) {
PrintExpr(Node->getSourceExpr());
}
+void StmtPrinter::VisitTypoExpr(TypoExpr *Node) {
+ // TODO: Print something reasonable for a TypoExpr, if necessary.
+ assert(false && "Cannot print TypoExpr nodes");
+}
+
void StmtPrinter::VisitAsTypeExpr(AsTypeExpr *Node) {
OS << "__builtin_astype(";
PrintExpr(Node->getSrcExpr());
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index f44f25c96aba..d1f25d63b3f9 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -310,18 +310,38 @@ void OMPClauseProfiler::VisitOMPUntiedClause(const OMPUntiedClause *) {}
void OMPClauseProfiler::VisitOMPMergeableClause(const OMPMergeableClause *) {}
+void OMPClauseProfiler::VisitOMPReadClause(const OMPReadClause *) {}
+
+void OMPClauseProfiler::VisitOMPWriteClause(const OMPWriteClause *) {}
+
+void OMPClauseProfiler::VisitOMPUpdateClause(const OMPUpdateClause *) {}
+
+void OMPClauseProfiler::VisitOMPCaptureClause(const OMPCaptureClause *) {}
+
+void OMPClauseProfiler::VisitOMPSeqCstClause(const OMPSeqCstClause *) {}
+
template<typename T>
void OMPClauseProfiler::VisitOMPClauseList(T *Node) {
- for (auto *I : Node->varlists())
- Profiler->VisitStmt(I);
+ for (auto *E : Node->varlists()) {
+ Profiler->VisitStmt(E);
+ }
}
void OMPClauseProfiler::VisitOMPPrivateClause(const OMPPrivateClause *C) {
VisitOMPClauseList(C);
+ for (auto *E : C->private_copies()) {
+ Profiler->VisitStmt(E);
+ }
}
-void OMPClauseProfiler::VisitOMPFirstprivateClause(
- const OMPFirstprivateClause *C) {
+void
+OMPClauseProfiler::VisitOMPFirstprivateClause(const OMPFirstprivateClause *C) {
VisitOMPClauseList(C);
+ for (auto *E : C->private_copies()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->inits()) {
+ Profiler->VisitStmt(E);
+ }
}
void
OMPClauseProfiler::VisitOMPLastprivateClause(const OMPLastprivateClause *C) {
@@ -368,16 +388,24 @@ StmtProfiler::VisitOMPExecutableDirective(const OMPExecutableDirective *S) {
P.Visit(*I);
}
+void StmtProfiler::VisitOMPLoopDirective(const OMPLoopDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
void StmtProfiler::VisitOMPParallelDirective(const OMPParallelDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPSimdDirective(const OMPSimdDirective *S) {
- VisitOMPExecutableDirective(S);
+ VisitOMPLoopDirective(S);
}
void StmtProfiler::VisitOMPForDirective(const OMPForDirective *S) {
- VisitOMPExecutableDirective(S);
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPForSimdDirective(const OMPForSimdDirective *S) {
+ VisitOMPLoopDirective(S);
}
void StmtProfiler::VisitOMPSectionsDirective(const OMPSectionsDirective *S) {
@@ -403,7 +431,12 @@ void StmtProfiler::VisitOMPCriticalDirective(const OMPCriticalDirective *S) {
void
StmtProfiler::VisitOMPParallelForDirective(const OMPParallelForDirective *S) {
- VisitOMPExecutableDirective(S);
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPParallelForSimdDirective(
+ const OMPParallelForSimdDirective *S) {
+ VisitOMPLoopDirective(S);
}
void StmtProfiler::VisitOMPParallelSectionsDirective(
@@ -431,6 +464,22 @@ void StmtProfiler::VisitOMPFlushDirective(const OMPFlushDirective *S) {
VisitOMPExecutableDirective(S);
}
+void StmtProfiler::VisitOMPOrderedDirective(const OMPOrderedDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPAtomicDirective(const OMPAtomicDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPTargetDirective(const OMPTargetDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPTeamsDirective(const OMPTeamsDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
void StmtProfiler::VisitExpr(const Expr *S) {
VisitStmt(S);
}
@@ -452,6 +501,7 @@ void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) {
void StmtProfiler::VisitIntegerLiteral(const IntegerLiteral *S) {
VisitExpr(S);
S->getValue().Profile(ID);
+ ID.AddInteger(S->getType()->castAs<BuiltinType>()->getKind());
}
void StmtProfiler::VisitCharacterLiteral(const CharacterLiteral *S) {
@@ -464,6 +514,7 @@ void StmtProfiler::VisitFloatingLiteral(const FloatingLiteral *S) {
VisitExpr(S);
S->getValue().Profile(ID);
ID.AddBoolean(S->isExact());
+ ID.AddInteger(S->getType()->castAs<BuiltinType>()->getKind());
}
void StmtProfiler::VisitImaginaryLiteral(const ImaginaryLiteral *S) {
@@ -1018,6 +1069,8 @@ StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) {
VisitDecl(C->getCapturedVar());
ID.AddBoolean(C->isPackExpansion());
break;
+ case LCK_VLAType:
+ llvm_unreachable("VLA type in explicit captures.");
}
}
// Note: If we actually needed to be able to match lambda
@@ -1187,10 +1240,19 @@ void StmtProfiler::VisitMaterializeTemporaryExpr(
VisitExpr(S);
}
+void StmtProfiler::VisitCXXFoldExpr(const CXXFoldExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getOperator());
+}
+
void StmtProfiler::VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
VisitExpr(E);
}
+void StmtProfiler::VisitTypoExpr(const TypoExpr *E) {
+ VisitExpr(E);
+}
+
void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) {
VisitExpr(S);
}
diff --git a/lib/AST/TemplateBase.cpp b/lib/AST/TemplateBase.cpp
index ac6a754fe733..f8b73cb8f89a 100644
--- a/lib/AST/TemplateBase.cpp
+++ b/lib/AST/TemplateBase.cpp
@@ -33,11 +33,26 @@ using namespace clang;
/// \param TemplArg the TemplateArgument instance to print.
///
/// \param Out the raw_ostream instance to use for printing.
+///
+/// \param Policy the printing policy for EnumConstantDecl printing.
static void printIntegral(const TemplateArgument &TemplArg,
- raw_ostream &Out) {
+ raw_ostream &Out, const PrintingPolicy& Policy) {
const ::clang::Type *T = TemplArg.getIntegralType().getTypePtr();
const llvm::APSInt &Val = TemplArg.getAsIntegral();
+ if (const EnumType *ET = T->getAs<EnumType>()) {
+ for (const EnumConstantDecl* ECD : ET->getDecl()->enumerators()) {
+ // In Sema::CheckTemplateArugment, enum template arguments value are
+ // extended to the size of the integer underlying the enum type. This
+ // may create a size difference between the enum value and template
+ // argument value, requiring isSameValue here instead of operator==.
+ if (llvm::APSInt::isSameValue(ECD->getInitVal(), Val)) {
+ ECD->printQualifiedName(Out, Policy);
+ return;
+ }
+ }
+ }
+
if (T->isBooleanType()) {
Out << (Val.getBoolValue() ? "true" : "false");
} else if (T->isCharType()) {
@@ -90,7 +105,8 @@ bool TemplateArgument::isDependent() const {
llvm_unreachable("Should not have a NULL template argument");
case Type:
- return getAsType()->isDependentType();
+ return getAsType()->isDependentType() ||
+ isa<PackExpansionType>(getAsType());
case Template:
return getAsTemplate().isDependent();
@@ -111,7 +127,8 @@ bool TemplateArgument::isDependent() const {
return false;
case Expression:
- return (getAsExpr()->isTypeDependent() || getAsExpr()->isValueDependent());
+ return (getAsExpr()->isTypeDependent() || getAsExpr()->isValueDependent() ||
+ isa<PackExpansionExpr>(getAsExpr()));
case Pack:
for (const auto &P : pack_elements())
@@ -294,8 +311,7 @@ bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
return TypeOrValue.V == Other.TypeOrValue.V;
case Declaration:
- return getAsDecl() == Other.getAsDecl() &&
- isDeclForReferenceParam() && Other.isDeclForReferenceParam();
+ return getAsDecl() == Other.getAsDecl();
case Integral:
return getIntegralType() == Other.getIntegralType() &&
@@ -377,7 +393,7 @@ void TemplateArgument::print(const PrintingPolicy &Policy,
break;
case Integral: {
- printIntegral(*this, Out);
+ printIntegral(*this, Out, Policy);
break;
}
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index 167787430294..e4f364d04f86 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -70,7 +70,7 @@ bool QualType::isConstant(QualType T, ASTContext &Ctx) {
if (const ArrayType *AT = Ctx.getAsArrayType(T))
return AT->getElementType().isConstant(Ctx);
- return false;
+ return T.getAddressSpace() == LangAS::opencl_constant;
}
unsigned ConstantArrayType::getNumAddressingBits(ASTContext &Context,
@@ -378,9 +378,10 @@ bool Type::isInterfaceType() const {
return false;
}
bool Type::isStructureOrClassType() const {
- if (const RecordType *RT = getAs<RecordType>())
- return RT->getDecl()->isStruct() || RT->getDecl()->isClass() ||
- RT->getDecl()->isInterface();
+ if (const RecordType *RT = getAs<RecordType>()) {
+ RecordDecl *RD = RT->getDecl();
+ return RD->isStruct() || RD->isClass() || RD->isInterface();
+ }
return false;
}
bool Type::isVoidPointerType() const {
@@ -540,10 +541,13 @@ const CXXRecordDecl *Type::getPointeeCXXRecordDecl() const {
}
CXXRecordDecl *Type::getAsCXXRecordDecl() const {
- if (const RecordType *RT = getAs<RecordType>())
- return dyn_cast<CXXRecordDecl>(RT->getDecl());
- else if (const InjectedClassNameType *Injected
- = getAs<InjectedClassNameType>())
+ return dyn_cast_or_null<CXXRecordDecl>(getAsTagDecl());
+}
+
+TagDecl *Type::getAsTagDecl() const {
+ if (const auto *TT = getAs<TagType>())
+ return cast<TagDecl>(TT->getDecl());
+ if (const auto *Injected = getAs<InjectedClassNameType>())
return Injected->getDecl();
return nullptr;
@@ -1147,7 +1151,7 @@ bool Type::isLiteralType(const ASTContext &Ctx) const {
// C++1y [basic.types]p10:
// A type is a literal type if it is:
// -- cv void; or
- if (Ctx.getLangOpts().CPlusPlus1y && isVoidType())
+ if (Ctx.getLangOpts().CPlusPlus14 && isVoidType())
return true;
// C++11 [basic.types]p10:
@@ -1577,6 +1581,7 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_X86FastCall: return "fastcall";
case CC_X86ThisCall: return "thiscall";
case CC_X86Pascal: return "pascal";
+ case CC_X86VectorCall: return "vectorcall";
case CC_X86_64Win64: return "ms_abi";
case CC_X86_64SysV: return "sysv_abi";
case CC_AAPCS: return "aapcs";
@@ -1591,18 +1596,21 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
QualType canonical,
const ExtProtoInfo &epi)
- : FunctionType(FunctionProto, result, epi.TypeQuals, canonical,
+ : FunctionType(FunctionProto, result, canonical,
result->isDependentType(),
result->isInstantiationDependentType(),
result->isVariablyModifiedType(),
result->containsUnexpandedParameterPack(), epi.ExtInfo),
- NumParams(params.size()), NumExceptions(epi.NumExceptions),
- ExceptionSpecType(epi.ExceptionSpecType),
+ NumParams(params.size()),
+ NumExceptions(epi.ExceptionSpec.Exceptions.size()),
+ ExceptionSpecType(epi.ExceptionSpec.Type),
HasAnyConsumedParams(epi.ConsumedParameters != nullptr),
- Variadic(epi.Variadic), HasTrailingReturn(epi.HasTrailingReturn),
- RefQualifier(epi.RefQualifier) {
+ Variadic(epi.Variadic), HasTrailingReturn(epi.HasTrailingReturn) {
assert(NumParams == params.size() && "function has too many parameters");
+ FunctionTypeBits.TypeQuals = epi.TypeQuals;
+ FunctionTypeBits.RefQualifier = epi.RefQualifier;
+
// Fill in the trailing argument array.
QualType *argSlot = reinterpret_cast<QualType*>(this+1);
for (unsigned i = 0; i != NumParams; ++i) {
@@ -1620,36 +1628,38 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
if (getExceptionSpecType() == EST_Dynamic) {
// Fill in the exception array.
QualType *exnSlot = argSlot + NumParams;
- for (unsigned i = 0, e = epi.NumExceptions; i != e; ++i) {
- if (epi.Exceptions[i]->isDependentType())
- setDependent();
- else if (epi.Exceptions[i]->isInstantiationDependentType())
+ unsigned I = 0;
+ for (QualType ExceptionType : epi.ExceptionSpec.Exceptions) {
+ // Note that a dependent exception specification does *not* make
+ // a type dependent; it's not even part of the C++ type system.
+ if (ExceptionType->isInstantiationDependentType())
setInstantiationDependent();
-
- if (epi.Exceptions[i]->containsUnexpandedParameterPack())
+
+ if (ExceptionType->containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
- exnSlot[i] = epi.Exceptions[i];
+ exnSlot[I++] = ExceptionType;
}
} else if (getExceptionSpecType() == EST_ComputedNoexcept) {
// Store the noexcept expression and context.
Expr **noexSlot = reinterpret_cast<Expr **>(argSlot + NumParams);
- *noexSlot = epi.NoexceptExpr;
-
- if (epi.NoexceptExpr) {
- if (epi.NoexceptExpr->isValueDependent()
- || epi.NoexceptExpr->isTypeDependent())
- setDependent();
- else if (epi.NoexceptExpr->isInstantiationDependent())
+ *noexSlot = epi.ExceptionSpec.NoexceptExpr;
+
+ if (epi.ExceptionSpec.NoexceptExpr) {
+ if (epi.ExceptionSpec.NoexceptExpr->isValueDependent() ||
+ epi.ExceptionSpec.NoexceptExpr->isInstantiationDependent())
setInstantiationDependent();
+
+ if (epi.ExceptionSpec.NoexceptExpr->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
}
} else if (getExceptionSpecType() == EST_Uninstantiated) {
// Store the function decl from which we will resolve our
// exception specification.
FunctionDecl **slot =
reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
- slot[0] = epi.ExceptionSpecDecl;
- slot[1] = epi.ExceptionSpecTemplate;
+ slot[0] = epi.ExceptionSpec.SourceDecl;
+ slot[1] = epi.ExceptionSpec.SourceTemplate;
// This exception specification doesn't make the type dependent, because
// it's not instantiated as part of instantiating the type.
} else if (getExceptionSpecType() == EST_Unevaluated) {
@@ -1657,7 +1667,7 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
// exception specification.
FunctionDecl **slot =
reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
- slot[0] = epi.ExceptionSpecDecl;
+ slot[0] = epi.ExceptionSpec.SourceDecl;
}
if (epi.ConsumedParameters) {
@@ -1667,6 +1677,18 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
}
}
+bool FunctionProtoType::hasDependentExceptionSpec() const {
+ if (Expr *NE = getNoexceptExpr())
+ return NE->isValueDependent();
+ for (QualType ET : exceptions())
+ // A pack expansion with a non-dependent pattern is still dependent,
+ // because we don't know whether the pattern is in the exception spec
+ // or not (that depends on whether the pack has 0 expansions).
+ if (ET->isDependentType() || ET->getAs<PackExpansionType>())
+ return true;
+ return false;
+}
+
FunctionProtoType::NoexceptResult
FunctionProtoType::getNoexceptSpec(const ASTContext &ctx) const {
ExceptionSpecificationType est = getExceptionSpecType();
@@ -1755,20 +1777,21 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
assert(!(unsigned(epi.Variadic) & ~1) &&
!(unsigned(epi.TypeQuals) & ~255) &&
!(unsigned(epi.RefQualifier) & ~3) &&
- !(unsigned(epi.ExceptionSpecType) & ~7) &&
+ !(unsigned(epi.ExceptionSpec.Type) & ~15) &&
"Values larger than expected.");
ID.AddInteger(unsigned(epi.Variadic) +
(epi.TypeQuals << 1) +
(epi.RefQualifier << 9) +
- (epi.ExceptionSpecType << 11));
- if (epi.ExceptionSpecType == EST_Dynamic) {
- for (unsigned i = 0; i != epi.NumExceptions; ++i)
- ID.AddPointer(epi.Exceptions[i].getAsOpaquePtr());
- } else if (epi.ExceptionSpecType == EST_ComputedNoexcept && epi.NoexceptExpr){
- epi.NoexceptExpr->Profile(ID, Context, false);
- } else if (epi.ExceptionSpecType == EST_Uninstantiated ||
- epi.ExceptionSpecType == EST_Unevaluated) {
- ID.AddPointer(epi.ExceptionSpecDecl->getCanonicalDecl());
+ (epi.ExceptionSpec.Type << 11));
+ if (epi.ExceptionSpec.Type == EST_Dynamic) {
+ for (QualType Ex : epi.ExceptionSpec.Exceptions)
+ ID.AddPointer(Ex.getAsOpaquePtr());
+ } else if (epi.ExceptionSpec.Type == EST_ComputedNoexcept &&
+ epi.ExceptionSpec.NoexceptExpr) {
+ epi.ExceptionSpec.NoexceptExpr->Profile(ID, Context, false);
+ } else if (epi.ExceptionSpec.Type == EST_Uninstantiated ||
+ epi.ExceptionSpec.Type == EST_Unevaluated) {
+ ID.AddPointer(epi.ExceptionSpec.SourceDecl->getCanonicalDecl());
}
if (epi.ConsumedParameters) {
for (unsigned i = 0; i != NumParams; ++i)
@@ -1909,6 +1932,7 @@ bool AttributedType::isCallingConv() const {
case attr_fastcall:
case attr_stdcall:
case attr_thiscall:
+ case attr_vectorcall:
case attr_pascal:
case attr_ms_abi:
case attr_sysv_abi:
@@ -1976,32 +2000,14 @@ anyDependentTemplateArguments(const TemplateArgumentLoc *Args, unsigned N,
return false;
}
-#ifndef NDEBUG
-static bool
-anyDependentTemplateArguments(const TemplateArgument *Args, unsigned N,
- bool &InstantiationDependent) {
- for (unsigned i = 0; i != N; ++i) {
- if (Args[i].isDependent()) {
- InstantiationDependent = true;
- return true;
- }
-
- if (Args[i].isInstantiationDependent())
- InstantiationDependent = true;
- }
- return false;
-}
-#endif
-
TemplateSpecializationType::
TemplateSpecializationType(TemplateName T,
const TemplateArgument *Args, unsigned NumArgs,
QualType Canon, QualType AliasedType)
: Type(TemplateSpecialization,
Canon.isNull()? QualType(this, 0) : Canon,
- Canon.isNull()? T.isDependent() : Canon->isDependentType(),
- Canon.isNull()? T.isDependent()
- : Canon->isInstantiationDependentType(),
+ Canon.isNull()? true : Canon->isDependentType(),
+ Canon.isNull()? true : Canon->isInstantiationDependentType(),
false,
T.containsUnexpandedParameterPack()),
Template(T), NumArgs(NumArgs), TypeAlias(!AliasedType.isNull()) {
@@ -2011,18 +2017,11 @@ TemplateSpecializationType(TemplateName T,
T.getKind() == TemplateName::SubstTemplateTemplateParm ||
T.getKind() == TemplateName::SubstTemplateTemplateParmPack) &&
"Unexpected template name for TemplateSpecializationType");
- bool InstantiationDependent;
- (void)InstantiationDependent;
- assert((!Canon.isNull() ||
- T.isDependent() ||
- ::anyDependentTemplateArguments(Args, NumArgs,
- InstantiationDependent)) &&
- "No canonical type for non-dependent class template specialization");
TemplateArgument *TemplateArgs
= reinterpret_cast<TemplateArgument *>(this + 1);
for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
- // Update dependent and variably-modified bits.
+ // Update instantiation-dependent and variably-modified bits.
// If the canonical type exists and is non-dependent, the template
// specialization type can be non-dependent even if one of the type
// arguments is. Given:
@@ -2030,17 +2029,13 @@ TemplateSpecializationType(TemplateName T,
// U<T> is always non-dependent, irrespective of the type T.
// However, U<Ts> contains an unexpanded parameter pack, even though
// its expansion (and thus its desugared type) doesn't.
- if (Canon.isNull() && Args[Arg].isDependent())
- setDependent();
- else if (Args[Arg].isInstantiationDependent())
+ if (Args[Arg].isInstantiationDependent())
setInstantiationDependent();
-
if (Args[Arg].getKind() == TemplateArgument::Type &&
Args[Arg].getAsType()->isVariablyModifiedType())
setVariablyModified();
if (Args[Arg].containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
-
new (&TemplateArgs[Arg]) TemplateArgument(Args[Arg]);
}
diff --git a/lib/AST/TypeLoc.cpp b/lib/AST/TypeLoc.cpp
index 208d695632a0..c069eb061739 100644
--- a/lib/AST/TypeLoc.cpp
+++ b/lib/AST/TypeLoc.cpp
@@ -312,6 +312,14 @@ TypeLoc TypeLoc::IgnoreParensImpl(TypeLoc TL) {
return TL;
}
+void TypeOfTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ TypeofLikeTypeLoc<TypeOfTypeLoc, TypeOfType, TypeOfTypeLocInfo>
+ ::initializeLocal(Context, Loc);
+ this->getLocalData()->UnderlyingTInfo = Context.getTrivialTypeSourceInfo(
+ getUnderlyingType(), Loc);
+}
+
void ElaboratedTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
setElaboratedKeywordLoc(Loc);
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index 061473eb848d..e36fc175c449 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -673,6 +673,9 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T,
case CC_X86ThisCall:
OS << " __attribute__((thiscall))";
break;
+ case CC_X86VectorCall:
+ OS << " __attribute__((vectorcall))";
+ break;
case CC_X86Pascal:
OS << " __attribute__((pascal))";
break;
@@ -1235,6 +1238,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case AttributedType::attr_fastcall: OS << "fastcall"; break;
case AttributedType::attr_stdcall: OS << "stdcall"; break;
case AttributedType::attr_thiscall: OS << "thiscall"; break;
+ case AttributedType::attr_vectorcall: OS << "vectorcall"; break;
case AttributedType::attr_pascal: OS << "pascal"; break;
case AttributedType::attr_ms_abi: OS << "ms_abi"; break;
case AttributedType::attr_sysv_abi: OS << "sysv_abi"; break;
@@ -1428,18 +1432,6 @@ PrintTemplateArgumentList(raw_ostream &OS,
OS << '>';
}
-void QualType::dump(const char *msg) const {
- if (msg)
- llvm::errs() << msg << ": ";
- LangOptions LO;
- print(llvm::errs(), PrintingPolicy(LO), "identifier");
- llvm::errs() << '\n';
-}
-
-LLVM_DUMP_METHOD void QualType::dump() const { dump(nullptr); }
-
-LLVM_DUMP_METHOD void Type::dump() const { QualType(this, 0).dump(); }
-
std::string Qualifiers::getAsString() const {
LangOptions LO;
return getAsString(PrintingPolicy(LO));
@@ -1498,6 +1490,9 @@ void Qualifiers::print(raw_ostream &OS, const PrintingPolicy& Policy,
case LangAS::opencl_constant:
OS << "__constant";
break;
+ case LangAS::opencl_generic:
+ OS << "__generic";
+ break;
default:
OS << "__attribute__((address_space(";
OS << addrspace;
diff --git a/lib/AST/VTTBuilder.cpp b/lib/AST/VTTBuilder.cpp
index c213d1cef6aa..53461ebbb812 100644
--- a/lib/AST/VTTBuilder.cpp
+++ b/lib/AST/VTTBuilder.cpp
@@ -105,7 +105,7 @@ VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
CharUnits BaseOffset;
if (I.isVirtual()) {
// Ignore virtual bases that we've already visited.
- if (!VBases.insert(BaseDecl))
+ if (!VBases.insert(BaseDecl).second)
continue;
BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
@@ -157,7 +157,7 @@ void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
// Check if this is a virtual base.
if (I.isVirtual()) {
// Check if we've seen this base before.
- if (!VBases.insert(BaseDecl))
+ if (!VBases.insert(BaseDecl).second)
continue;
CharUnits BaseOffset =
diff --git a/lib/AST/VTableBuilder.cpp b/lib/AST/VTableBuilder.cpp
index fa1127f58f9b..ddb1f057ac8e 100644
--- a/lib/AST/VTableBuilder.cpp
+++ b/lib/AST/VTableBuilder.cpp
@@ -64,7 +64,7 @@ public:
/// Method - The method decl of the overrider.
const CXXMethodDecl *Method;
- /// VirtualBase - The virtual base class subobject of this overridder.
+ /// VirtualBase - The virtual base class subobject of this overrider.
/// Note that this records the closest derived virtual base class subobject.
const CXXRecordDecl *VirtualBase;
@@ -389,7 +389,7 @@ void FinalOverriders::dump(raw_ostream &Out, BaseSubobject Base,
CharUnits BaseOffset;
if (B.isVirtual()) {
- if (!VisitedVirtualBases.insert(BaseDecl)) {
+ if (!VisitedVirtualBases.insert(BaseDecl).second) {
// We've visited this base before.
continue;
}
@@ -748,7 +748,7 @@ VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
// Check if this is a virtual base that we haven't visited before.
- if (B.isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
+ if (B.isVirtual() && VisitedVirtualBases.insert(BaseDecl).second) {
CharUnits Offset =
LayoutClassLayout.getVBaseClassOffset(BaseDecl) - OffsetInLayoutClass;
@@ -1105,7 +1105,7 @@ namespace {
bool visit(const CXXMethodDecl *MD) {
// Don't recurse on this method if we've already collected it.
- return Methods->insert(MD);
+ return Methods->insert(MD).second;
}
};
}
@@ -1842,7 +1842,7 @@ void ItaniumVTableBuilder::DeterminePrimaryVirtualBases(
CharUnits BaseOffsetInLayoutClass;
if (B.isVirtual()) {
- if (!VBases.insert(BaseDecl))
+ if (!VBases.insert(BaseDecl).second)
continue;
const ASTRecordLayout &LayoutClassLayout =
@@ -1870,8 +1870,9 @@ void ItaniumVTableBuilder::LayoutVTablesForVirtualBases(
// Check if this base needs a vtable. (If it's virtual, not a primary base
// of some other class, and we haven't visited it before).
- if (B.isVirtual() && BaseDecl->isDynamicClass() &&
- !PrimaryVirtualBases.count(BaseDecl) && VBases.insert(BaseDecl)) {
+ if (B.isVirtual() && BaseDecl->isDynamicClass() &&
+ !PrimaryVirtualBases.count(BaseDecl) &&
+ VBases.insert(BaseDecl).second) {
const ASTRecordLayout &MostDerivedClassLayout =
Context.getASTRecordLayout(MostDerivedClass);
CharUnits BaseOffset =
@@ -2390,6 +2391,7 @@ namespace {
// first vfptr whose table provides a compatible overridden method. In many
// cases, this permits the original vf-table entry to directly call
// the method instead of passing through a thunk.
+// See example before VFTableBuilder::ComputeThisOffset below.
//
// A compatible overridden method is one which does not have a non-trivial
// covariant-return adjustment.
@@ -2412,6 +2414,9 @@ namespace {
// a) a user-defined ctor/dtor
// and
// b) a method overriding a method in a virtual base.
+//
+// To get a better understanding of this code,
+// you might want to see examples in test/CodeGenCXX/microsoft-abi-vtables-*.cpp
class VFTableBuilder {
public:
@@ -2464,11 +2469,18 @@ private:
/// or used for vcalls in the most derived class.
bool Shadowed;
- MethodInfo(uint64_t VBTableIndex, uint64_t VFTableIndex)
+ /// UsesExtraSlot - Indicates if this vftable slot was created because
+ /// any of the overridden slots required a return adjusting thunk.
+ bool UsesExtraSlot;
+
+ MethodInfo(uint64_t VBTableIndex, uint64_t VFTableIndex,
+ bool UsesExtraSlot = false)
: VBTableIndex(VBTableIndex), VFTableIndex(VFTableIndex),
- Shadowed(false) {}
+ Shadowed(false), UsesExtraSlot(UsesExtraSlot) {}
- MethodInfo() : VBTableIndex(0), VFTableIndex(0), Shadowed(false) {}
+ MethodInfo()
+ : VBTableIndex(0), VFTableIndex(0), Shadowed(false),
+ UsesExtraSlot(false) {}
};
typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
@@ -2525,8 +2537,6 @@ private:
}
}
- bool NeedsReturnAdjustingThunk(const CXXMethodDecl *MD);
-
/// AddMethods - Add the methods of this base subobject and the relevant
/// subbases to the vftable we're currently laying out.
void AddMethods(BaseSubobject Base, unsigned BaseDepth,
@@ -2534,13 +2544,15 @@ private:
BasesSetVectorTy &VisitedBases);
void LayoutVFTable() {
- // FIXME: add support for RTTI when we have proper LLVM support for symbols
- // pointing to the middle of a section.
+ // RTTI data goes before all other entries.
+ if (HasRTTIComponent)
+ Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
BasesSetVectorTy VisitedBases;
AddMethods(BaseSubobject(MostDerivedClass, CharUnits::Zero()), 0, nullptr,
VisitedBases);
- assert(Components.size() && "vftable can't be empty");
+ assert((HasRTTIComponent ? Components.size() - 1 : Components.size()) &&
+ "vftable can't be empty");
assert(MethodVFTableLocations.empty());
for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
@@ -2561,13 +2573,6 @@ private:
}
}
- void ErrorUnsupported(StringRef Feature, SourceLocation Location) {
- clang::DiagnosticsEngine &Diags = Context.getDiagnostics();
- unsigned DiagID = Diags.getCustomDiagID(
- DiagnosticsEngine::Error, "v-table layout for %0 is not supported yet");
- Diags.Report(Context.getFullLoc(Location), DiagID) << Feature;
- }
-
public:
VFTableBuilder(MicrosoftVTableContext &VTables,
const CXXRecordDecl *MostDerivedClass, const VPtrInfo *Which)
@@ -2581,8 +2586,6 @@ public:
// definition of the vftable.
HasRTTIComponent = Context.getLangOpts().RTTIData &&
!MostDerivedClass->hasAttr<DLLImportAttr>();
- if (HasRTTIComponent)
- Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
LayoutVFTable();
@@ -2634,7 +2637,7 @@ struct InitialOverriddenDefinitionCollector {
if (OverriddenMD->size_overridden_methods() == 0)
Bases.insert(OverriddenMD->getParent());
// Don't recurse on this method if we've already collected it.
- return VisitedOverriddenMethods.insert(OverriddenMD);
+ return VisitedOverriddenMethods.insert(OverriddenMD).second;
}
};
@@ -2644,6 +2647,60 @@ static bool BaseInSet(const CXXBaseSpecifier *Specifier,
return Bases->count(Specifier->getType()->getAsCXXRecordDecl());
}
+// Let's study one class hierarchy as an example:
+// struct A {
+// virtual void f();
+// int x;
+// };
+//
+// struct B : virtual A {
+// virtual void f();
+// };
+//
+// Record layouts:
+// struct A:
+// 0 | (A vftable pointer)
+// 4 | int x
+//
+// struct B:
+// 0 | (B vbtable pointer)
+// 4 | struct A (virtual base)
+// 4 | (A vftable pointer)
+// 8 | int x
+//
+// Let's assume we have a pointer to the A part of an object of dynamic type B:
+// B b;
+// A *a = (A*)&b;
+// a->f();
+//
+// In this hierarchy, f() belongs to the vftable of A, so B::f() expects
+// "this" parameter to point at the A subobject, which is B+4.
+// In the B::f() prologue, it adjusts "this" back to B by subtracting 4,
+// performed as a *static* adjustment.
+//
+// Interesting thing happens when we alter the relative placement of A and B
+// subobjects in a class:
+// struct C : virtual B { };
+//
+// C c;
+// A *a = (A*)&c;
+// a->f();
+//
+// Respective record layout is:
+// 0 | (C vbtable pointer)
+// 4 | struct A (virtual base)
+// 4 | (A vftable pointer)
+// 8 | int x
+// 12 | struct B (virtual base)
+// 12 | (B vbtable pointer)
+//
+// The final overrider of f() in class C is still B::f(), so B+4 should be
+// passed as "this" to that code. However, "a" points at B-8, so the respective
+// vftable entry should hold a thunk that adds 12 to the "this" argument before
+// performing a tail call to B::f().
+//
+// With this example in mind, we can now calculate the 'this' argument offset
+// for the given method, relative to the beginning of the MostDerivedClass.
CharUnits
VFTableBuilder::ComputeThisOffset(FinalOverriders::OverriderInfo Overrider) {
InitialOverriddenDefinitionCollector Collector;
@@ -2723,6 +2780,104 @@ VFTableBuilder::ComputeThisOffset(FinalOverriders::OverriderInfo Overrider) {
return Ret;
}
+// Things are getting even more complex when the "this" adjustment has to
+// use a dynamic offset instead of a static one, or even two dynamic offsets.
+// This is sometimes required when a virtual call happens in the middle of
+// a non-most-derived class construction or destruction.
+//
+// Let's take a look at the following example:
+// struct A {
+// virtual void f();
+// };
+//
+// void foo(A *a) { a->f(); } // Knows nothing about siblings of A.
+//
+// struct B : virtual A {
+// virtual void f();
+// B() {
+// foo(this);
+// }
+// };
+//
+// struct C : virtual B {
+// virtual void f();
+// };
+//
+// Record layouts for these classes are:
+// struct A
+// 0 | (A vftable pointer)
+//
+// struct B
+// 0 | (B vbtable pointer)
+// 4 | (vtordisp for vbase A)
+// 8 | struct A (virtual base)
+// 8 | (A vftable pointer)
+//
+// struct C
+// 0 | (C vbtable pointer)
+// 4 | (vtordisp for vbase A)
+// 8 | struct A (virtual base) // A precedes B!
+// 8 | (A vftable pointer)
+// 12 | struct B (virtual base)
+// 12 | (B vbtable pointer)
+//
+// When one creates an object of type C, the C constructor:
+// - initializes all the vbptrs, then
+// - calls the A subobject constructor
+// (initializes A's vfptr with an address of A vftable), then
+// - calls the B subobject constructor
+// (initializes A's vfptr with an address of B vftable and vtordisp for A),
+// that in turn calls foo(), then
+// - initializes A's vfptr with an address of C vftable and zeroes out the
+// vtordisp
+// FIXME: if a structor knows it belongs to MDC, why doesn't it use a vftable
+// without vtordisp thunks?
+// FIXME: how are vtordisp handled in the presence of nooverride/final?
+//
+// When foo() is called, an object with a layout of class C has a vftable
+// referencing B::f() that assumes a B layout, so the "this" adjustments are
+// incorrect, unless an extra adjustment is done. This adjustment is called
+// "vtordisp adjustment". Vtordisp basically holds the difference between the
+// actual location of a vbase in the layout class and the location assumed by
+// the vftable of the class being constructed/destructed. Vtordisp is only
+// needed if "this" escapes a
+// structor (or we can't prove otherwise).
+// [i.e. vtordisp is a dynamic adjustment for a static adjustment, which is an
+// estimation of a dynamic adjustment]
+//
+// foo() gets a pointer to the A vbase and doesn't know anything about B or C,
+// so it just passes that pointer as "this" in a virtual call.
+// If there was no vtordisp, that would just dispatch to B::f().
+// However, B::f() assumes B+8 is passed as "this",
+// yet the pointer foo() passes along is B-4 (i.e. C+8).
+// An extra adjustment is needed, so we emit a thunk into the B vftable.
+// This vtordisp thunk subtracts the value of vtordisp
+// from the "this" argument (-12) before making a tailcall to B::f().
+//
+// Let's consider an even more complex example:
+// struct D : virtual B, virtual C {
+// D() {
+// foo(this);
+// }
+// };
+//
+// struct D
+// 0 | (D vbtable pointer)
+// 4 | (vtordisp for vbase A)
+// 8 | struct A (virtual base) // A precedes both B and C!
+// 8 | (A vftable pointer)
+// 12 | struct B (virtual base) // B precedes C!
+// 12 | (B vbtable pointer)
+// 16 | struct C (virtual base)
+// 16 | (C vbtable pointer)
+//
+// When D::D() calls foo(), we find ourselves in a thunk that should tailcall
+// to C::f(), which assumes C+8 as its "this" parameter. This time, foo()
+// passes along A, which is C-8. The A vtordisp holds
+// "D.vbptr[index_of_A] - offset_of_A_in_D"
+// and we statically know offset_of_A_in_D, so can get a pointer to D.
+// When we know it, we can make an extra vbtable lookup to locate the C vbase
+// and one extra static adjustment to calculate the expected value of C+8.
void VFTableBuilder::CalculateVtordispAdjustment(
FinalOverriders::OverriderInfo Overrider, CharUnits ThisOffset,
ThisAdjustment &TA) {
@@ -2740,9 +2895,9 @@ void VFTableBuilder::CalculateVtordispAdjustment(
// OK, now we know we need to use a vtordisp thunk.
// The implicit vtordisp field is located right before the vbase.
- CharUnits VFPtrVBaseOffset = VBaseMapEntry->second.VBaseOffset;
+ CharUnits OffsetOfVBaseWithVFPtr = VBaseMapEntry->second.VBaseOffset;
TA.Virtual.Microsoft.VtordispOffset =
- (VFPtrVBaseOffset - WhichVFPtr.FullOffsetInMDC).getQuantity() - 4;
+ (OffsetOfVBaseWithVFPtr - WhichVFPtr.FullOffsetInMDC).getQuantity() - 4;
// A simple vtordisp thunk will suffice if the final overrider is defined
// in either the most derived class or its non-virtual base.
@@ -2753,7 +2908,7 @@ void VFTableBuilder::CalculateVtordispAdjustment(
// Otherwise, we need to do use the dynamic offset of the final overrider
// in order to get "this" adjustment right.
TA.Virtual.Microsoft.VBPtrOffset =
- (VFPtrVBaseOffset + WhichVFPtr.NonVirtualOffset -
+ (OffsetOfVBaseWithVFPtr + WhichVFPtr.NonVirtualOffset -
MostDerivedClassLayout.getVBPtrOffset()).getQuantity();
TA.Virtual.Microsoft.VBOffsetOffset =
Context.getTypeSizeInChars(Context.IntTy).getQuantity() *
@@ -2789,24 +2944,6 @@ static void GroupNewVirtualOverloads(
VirtualMethods.append(Groups[I].rbegin(), Groups[I].rend());
}
-/// We need a return adjusting thunk for this method if its return type is
-/// not trivially convertible to the return type of any of its overridden
-/// methods.
-bool VFTableBuilder::NeedsReturnAdjustingThunk(const CXXMethodDecl *MD) {
- OverriddenMethodsSetTy OverriddenMethods;
- ComputeAllOverriddenMethods(MD, OverriddenMethods);
- for (OverriddenMethodsSetTy::iterator I = OverriddenMethods.begin(),
- E = OverriddenMethods.end();
- I != E; ++I) {
- const CXXMethodDecl *OverriddenMD = *I;
- BaseOffset Adjustment =
- ComputeReturnAdjustmentBaseOffset(Context, MD, OverriddenMD);
- if (!Adjustment.isEmpty())
- return true;
- }
- return false;
-}
-
static bool isDirectVBase(const CXXRecordDecl *Base, const CXXRecordDecl *RD) {
for (const auto &B : RD->bases()) {
if (B.isVirtual() && B.getType()->getAsCXXRecordDecl() == Base)
@@ -2866,20 +3003,21 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
for (unsigned I = 0, E = VirtualMethods.size(); I != E; ++I) {
const CXXMethodDecl *MD = VirtualMethods[I];
- FinalOverriders::OverriderInfo Overrider =
+ FinalOverriders::OverriderInfo FinalOverrider =
Overriders.getOverrider(MD, Base.getBaseOffset());
- const CXXMethodDecl *OverriderMD = Overrider.Method;
+ const CXXMethodDecl *FinalOverriderMD = FinalOverrider.Method;
const CXXMethodDecl *OverriddenMD =
FindNearestOverriddenMethod(MD, VisitedBases);
ThisAdjustment ThisAdjustmentOffset;
- bool ReturnAdjustingThunk = false;
- CharUnits ThisOffset = ComputeThisOffset(Overrider);
+ bool ReturnAdjustingThunk = false, ForceReturnAdjustmentMangling = false;
+ CharUnits ThisOffset = ComputeThisOffset(FinalOverrider);
ThisAdjustmentOffset.NonVirtual =
(ThisOffset - WhichVFPtr.FullOffsetInMDC).getQuantity();
- if ((OverriddenMD || OverriderMD != MD) &&
+ if ((OverriddenMD || FinalOverriderMD != MD) &&
WhichVFPtr.getVBaseWithVPtr())
- CalculateVtordispAdjustment(Overrider, ThisOffset, ThisAdjustmentOffset);
+ CalculateVtordispAdjustment(FinalOverrider, ThisOffset,
+ ThisAdjustmentOffset);
if (OverriddenMD) {
// If MD overrides anything in this vftable, we need to update the entries.
@@ -2892,7 +3030,16 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
MethodInfo &OverriddenMethodInfo = OverriddenMDIterator->second;
- if (!NeedsReturnAdjustingThunk(MD)) {
+ // Let's check if the overrider requires any return adjustments.
+ // We must create a new slot if the MD's return type is not trivially
+ // convertible to the OverriddenMD's one.
+ // Once a chain of method overrides adds a return adjusting vftable slot,
+ // all subsequent overrides will also use an extra method slot.
+ ReturnAdjustingThunk = !ComputeReturnAdjustmentBaseOffset(
+ Context, MD, OverriddenMD).isEmpty() ||
+ OverriddenMethodInfo.UsesExtraSlot;
+
+ if (!ReturnAdjustingThunk) {
// No return adjustment needed - just replace the overridden method info
// with the current info.
MethodInfo MI(OverriddenMethodInfo.VBTableIndex,
@@ -2911,8 +3058,8 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
// Force a special name mangling for a return-adjusting thunk
// unless the method is the final overrider without this adjustment.
- ReturnAdjustingThunk =
- !(MD == OverriderMD && ThisAdjustmentOffset.isEmpty());
+ ForceReturnAdjustmentMangling =
+ !(MD == FinalOverriderMD && ThisAdjustmentOffset.isEmpty());
} else if (Base.getBaseOffset() != WhichVFPtr.FullOffsetInMDC ||
MD->size_overridden_methods()) {
// Skip methods that don't belong to the vftable of the current class,
@@ -2926,7 +3073,8 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
unsigned VBIndex =
LastVBase ? VTables.getVBTableIndex(MostDerivedClass, LastVBase) : 0;
MethodInfo MI(VBIndex,
- HasRTTIComponent ? Components.size() - 1 : Components.size());
+ HasRTTIComponent ? Components.size() - 1 : Components.size(),
+ ReturnAdjustingThunk);
assert(!MethodInfoMap.count(MD) &&
"Should not have method info for this method yet!");
@@ -2936,12 +3084,12 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
// We don't want to do this for pure virtual member functions.
BaseOffset ReturnAdjustmentOffset;
ReturnAdjustment ReturnAdjustment;
- if (!OverriderMD->isPure()) {
+ if (!FinalOverriderMD->isPure()) {
ReturnAdjustmentOffset =
- ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD);
+ ComputeReturnAdjustmentBaseOffset(Context, FinalOverriderMD, MD);
}
if (!ReturnAdjustmentOffset.isEmpty()) {
- ReturnAdjustingThunk = true;
+ ForceReturnAdjustmentMangling = true;
ReturnAdjustment.NonVirtual =
ReturnAdjustmentOffset.NonVirtualOffset.getQuantity();
if (ReturnAdjustmentOffset.VirtualBase) {
@@ -2955,8 +3103,9 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
}
}
- AddMethod(OverriderMD, ThunkInfo(ThisAdjustmentOffset, ReturnAdjustment,
- ReturnAdjustingThunk ? MD : nullptr));
+ AddMethod(FinalOverriderMD,
+ ThunkInfo(ThisAdjustmentOffset, ReturnAdjustment,
+ ForceReturnAdjustmentMangling ? MD : nullptr));
}
}
@@ -3039,10 +3188,8 @@ void VFTableBuilder::dumpLayout(raw_ostream &Out) {
if (MD->isPure())
Out << " [pure]";
- if (MD->isDeleted()) {
- ErrorUnsupported("deleted methods", MD->getLocation());
+ if (MD->isDeleted())
Out << " [deleted]";
- }
ThunkInfo Thunk = VTableThunks.lookup(I);
if (!Thunk.isEmpty())
@@ -3131,7 +3278,7 @@ void VFTableBuilder::dumpLayout(raw_ostream &Out) {
}
static bool setsIntersect(const llvm::SmallPtrSet<const CXXRecordDecl *, 4> &A,
- const ArrayRef<const CXXRecordDecl *> &B) {
+ ArrayRef<const CXXRecordDecl *> B) {
for (ArrayRef<const CXXRecordDecl *>::iterator I = B.begin(), E = B.end();
I != E; ++I) {
if (A.count(*I))
@@ -3201,10 +3348,6 @@ void MicrosoftVTableContext::computeVTablePaths(bool ForVBTables,
if (P->MangledPath.empty() || P->MangledPath.back() != Base)
P->NextBaseToMangle = Base;
- // Keep track of the full path.
- // FIXME: Why do we need this?
- P->PathToBaseWithVPtr.insert(P->PathToBaseWithVPtr.begin(), Base);
-
// Keep track of which vtable the derived class is going to extend with
// new methods or bases. We append to either the vftable of our primary
// base, or the first non-virtual base that has a vbtable.
@@ -3292,6 +3435,58 @@ MicrosoftVTableContext::~MicrosoftVTableContext() {
llvm::DeleteContainerSeconds(VBaseInfo);
}
+static bool
+findPathForVPtr(ASTContext &Context, const ASTRecordLayout &MostDerivedLayout,
+ const CXXRecordDecl *RD, CharUnits Offset,
+ llvm::SmallPtrSetImpl<const CXXRecordDecl *> &VBasesSeen,
+ VPtrInfo::BasePath &FullPath, VPtrInfo *Info) {
+ if (RD == Info->BaseWithVPtr && Offset == Info->FullOffsetInMDC) {
+ Info->PathToBaseWithVPtr = FullPath;
+ return true;
+ }
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Recurse with non-virtual bases first.
+ // FIXME: Does this need to be in layout order? Virtual bases will be in base
+ // specifier order, which isn't necessarily layout order.
+ SmallVector<CXXBaseSpecifier, 4> Bases(RD->bases_begin(), RD->bases_end());
+ std::stable_partition(Bases.begin(), Bases.end(),
+ [](CXXBaseSpecifier bs) { return !bs.isVirtual(); });
+
+ for (const auto &B : Bases) {
+ const CXXRecordDecl *Base = B.getType()->getAsCXXRecordDecl();
+ CharUnits NewOffset;
+ if (!B.isVirtual())
+ NewOffset = Offset + Layout.getBaseClassOffset(Base);
+ else {
+ if (!VBasesSeen.insert(Base).second)
+ return false;
+ NewOffset = MostDerivedLayout.getVBaseClassOffset(Base);
+ }
+ FullPath.push_back(Base);
+ if (findPathForVPtr(Context, MostDerivedLayout, Base, NewOffset, VBasesSeen,
+ FullPath, Info))
+ return true;
+ FullPath.pop_back();
+ }
+ return false;
+}
+
+static void computeFullPathsForVFTables(ASTContext &Context,
+ const CXXRecordDecl *RD,
+ VPtrInfoVector &Paths) {
+ llvm::SmallPtrSet<const CXXRecordDecl*, 4> VBasesSeen;
+ const ASTRecordLayout &MostDerivedLayout = Context.getASTRecordLayout(RD);
+ VPtrInfo::BasePath FullPath;
+ for (VPtrInfo *Info : Paths) {
+ findPathForVPtr(Context, MostDerivedLayout, RD, CharUnits::Zero(),
+ VBasesSeen, FullPath, Info);
+ VBasesSeen.clear();
+ FullPath.clear();
+ }
+}
+
void MicrosoftVTableContext::computeVTableRelatedInformation(
const CXXRecordDecl *RD) {
assert(RD->isDynamicClass());
@@ -3304,6 +3499,7 @@ void MicrosoftVTableContext::computeVTableRelatedInformation(
VPtrInfoVector *VFPtrs = new VPtrInfoVector();
computeVTablePaths(/*ForVBTables=*/false, RD, *VFPtrs);
+ computeFullPathsForVFTables(Context, RD, *VFPtrs);
VFPtrLocations[RD] = VFPtrs;
MethodVFTableLocationsTy NewMethodLocations;
diff --git a/lib/ASTMatchers/ASTMatchFinder.cpp b/lib/ASTMatchers/ASTMatchFinder.cpp
index 23708e2ff0c7..fa7968a805c4 100644
--- a/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -20,7 +20,11 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Timer.h"
#include <deque>
+#include <memory>
#include <set>
namespace clang {
@@ -53,7 +57,7 @@ static const unsigned MaxMemoizationEntries = 10000;
// FIXME: Benchmark whether memoization of non-pointer typed nodes
// provides enough benefit for the additional amount of code.
struct MatchKey {
- uint64_t MatcherID;
+ DynTypedMatcher::MatcherIDType MatcherID;
ast_type_traits::DynTypedNode Node;
BoundNodesTreeBuilder BoundNodes;
@@ -292,28 +296,33 @@ private:
class MatchASTVisitor : public RecursiveASTVisitor<MatchASTVisitor>,
public ASTMatchFinder {
public:
- MatchASTVisitor(
- std::vector<std::pair<internal::DynTypedMatcher, MatchCallback *> > *
- MatcherCallbackPairs)
- : MatcherCallbackPairs(MatcherCallbackPairs), ActiveASTContext(nullptr) {}
+ MatchASTVisitor(const MatchFinder::MatchersByType *Matchers,
+ const MatchFinder::MatchFinderOptions &Options)
+ : Matchers(Matchers), Options(Options), ActiveASTContext(nullptr) {}
+
+ ~MatchASTVisitor() {
+ if (Options.CheckProfiling) {
+ Options.CheckProfiling->Records = std::move(TimeByBucket);
+ }
+ }
void onStartOfTranslationUnit() {
- for (std::vector<std::pair<internal::DynTypedMatcher,
- MatchCallback *> >::const_iterator
- I = MatcherCallbackPairs->begin(),
- E = MatcherCallbackPairs->end();
- I != E; ++I) {
- I->second->onStartOfTranslationUnit();
+ const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ TimeBucketRegion Timer;
+ for (MatchCallback *MC : Matchers->AllCallbacks) {
+ if (EnableCheckProfiling)
+ Timer.setBucket(&TimeByBucket[MC->getID()]);
+ MC->onStartOfTranslationUnit();
}
}
void onEndOfTranslationUnit() {
- for (std::vector<std::pair<internal::DynTypedMatcher,
- MatchCallback *> >::const_iterator
- I = MatcherCallbackPairs->begin(),
- E = MatcherCallbackPairs->end();
- I != E; ++I) {
- I->second->onEndOfTranslationUnit();
+ const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ TimeBucketRegion Timer;
+ for (MatchCallback *MC : Matchers->AllCallbacks) {
+ if (EnableCheckProfiling)
+ Timer.setBucket(&TimeByBucket[MC->getID()]);
+ MC->onEndOfTranslationUnit();
}
}
@@ -372,7 +381,7 @@ public:
BoundNodesTreeBuilder *Builder, int MaxDepth,
TraversalKind Traversal, BindKind Bind) {
// For AST-nodes that don't have an identity, we can't memoize.
- if (!Node.getMemoizationData())
+ if (!Node.getMemoizationData() || !Builder->isComparable())
return matchesRecursively(Node, Matcher, Builder, MaxDepth, Traversal,
Bind);
@@ -392,9 +401,12 @@ public:
Result.Nodes = *Builder;
Result.ResultOfMatch = matchesRecursively(Node, Matcher, &Result.Nodes,
MaxDepth, Traversal, Bind);
- ResultCache[Key] = Result;
- *Builder = Result.Nodes;
- return Result.ResultOfMatch;
+
+ MemoizedMatchResult &CachedResult = ResultCache[Key];
+ CachedResult = std::move(Result);
+
+ *Builder = CachedResult.Nodes;
+ return CachedResult.ResultOfMatch;
}
// Matches children or descendants of 'Node' with 'BaseMatcher'.
@@ -447,22 +459,27 @@ public:
// Matches all registered matchers on the given node and calls the
// result callback for every node that matches.
- void match(const ast_type_traits::DynTypedNode& Node) {
- for (std::vector<std::pair<internal::DynTypedMatcher,
- MatchCallback *> >::const_iterator
- I = MatcherCallbackPairs->begin(),
- E = MatcherCallbackPairs->end();
- I != E; ++I) {
- BoundNodesTreeBuilder Builder;
- if (I->first.matches(Node, this, &Builder)) {
- MatchVisitor Visitor(ActiveASTContext, I->second);
- Builder.visitMatches(&Visitor);
- }
+ void match(const ast_type_traits::DynTypedNode &Node) {
+ // FIXME: Improve this with a switch or a visitor pattern.
+ if (auto *N = Node.get<Decl>()) {
+ match(*N);
+ } else if (auto *N = Node.get<Stmt>()) {
+ match(*N);
+ } else if (auto *N = Node.get<Type>()) {
+ match(*N);
+ } else if (auto *N = Node.get<QualType>()) {
+ match(*N);
+ } else if (auto *N = Node.get<NestedNameSpecifier>()) {
+ match(*N);
+ } else if (auto *N = Node.get<NestedNameSpecifierLoc>()) {
+ match(*N);
+ } else if (auto *N = Node.get<TypeLoc>()) {
+ match(*N);
}
}
template <typename T> void match(const T &Node) {
- match(ast_type_traits::DynTypedNode::create(Node));
+ matchDispatch(&Node);
}
// Implements ASTMatchFinder::getASTContext.
@@ -475,6 +492,116 @@ public:
bool shouldUseDataRecursionFor(clang::Stmt *S) const { return false; }
private:
+ class TimeBucketRegion {
+ public:
+ TimeBucketRegion() : Bucket(nullptr) {}
+ ~TimeBucketRegion() { setBucket(nullptr); }
+
+ /// \brief Start timing for \p NewBucket.
+ ///
+ /// If there was a bucket already set, it will finish the timing for that
+ /// other bucket.
+ /// \p NewBucket will be timed until the next call to \c setBucket() or
+ /// until the \c TimeBucketRegion is destroyed.
+ /// If \p NewBucket is the same as the currently timed bucket, this call
+ /// does nothing.
+ void setBucket(llvm::TimeRecord *NewBucket) {
+ if (Bucket != NewBucket) {
+ auto Now = llvm::TimeRecord::getCurrentTime(true);
+ if (Bucket)
+ *Bucket += Now;
+ if (NewBucket)
+ *NewBucket -= Now;
+ Bucket = NewBucket;
+ }
+ }
+
+ private:
+ llvm::TimeRecord *Bucket;
+ };
+
+ /// \brief Runs all the \p Matchers on \p Node.
+ ///
+ /// Used by \c matchDispatch() below.
+ template <typename T, typename MC>
+ void matchWithoutFilter(const T &Node, const MC &Matchers) {
+ const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ TimeBucketRegion Timer;
+ for (const auto &MP : Matchers) {
+ if (EnableCheckProfiling)
+ Timer.setBucket(&TimeByBucket[MP.second->getID()]);
+ BoundNodesTreeBuilder Builder;
+ if (MP.first.matches(Node, this, &Builder)) {
+ MatchVisitor Visitor(ActiveASTContext, MP.second);
+ Builder.visitMatches(&Visitor);
+ }
+ }
+ }
+
+ void matchWithFilter(const ast_type_traits::DynTypedNode &DynNode) {
+ auto Kind = DynNode.getNodeKind();
+ auto it = MatcherFiltersMap.find(Kind);
+ const auto &Filter =
+ it != MatcherFiltersMap.end() ? it->second : getFilterForKind(Kind);
+
+ if (Filter.empty())
+ return;
+
+ const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ TimeBucketRegion Timer;
+ auto &Matchers = this->Matchers->DeclOrStmt;
+ for (unsigned short I : Filter) {
+ auto &MP = Matchers[I];
+ if (EnableCheckProfiling)
+ Timer.setBucket(&TimeByBucket[MP.second->getID()]);
+ BoundNodesTreeBuilder Builder;
+ if (MP.first.matchesNoKindCheck(DynNode, this, &Builder)) {
+ MatchVisitor Visitor(ActiveASTContext, MP.second);
+ Builder.visitMatches(&Visitor);
+ }
+ }
+ }
+
+ const std::vector<unsigned short> &
+ getFilterForKind(ast_type_traits::ASTNodeKind Kind) {
+ auto &Filter = MatcherFiltersMap[Kind];
+ auto &Matchers = this->Matchers->DeclOrStmt;
+ assert((Matchers.size() < USHRT_MAX) && "Too many matchers.");
+ for (unsigned I = 0, E = Matchers.size(); I != E; ++I) {
+ if (Matchers[I].first.canMatchNodesOfKind(Kind)) {
+ Filter.push_back(I);
+ }
+ }
+ return Filter;
+ }
+
+ /// @{
+ /// \brief Overloads to pair the different node types to their matchers.
+ void matchDispatch(const Decl *Node) {
+ return matchWithFilter(ast_type_traits::DynTypedNode::create(*Node));
+ }
+ void matchDispatch(const Stmt *Node) {
+ return matchWithFilter(ast_type_traits::DynTypedNode::create(*Node));
+ }
+
+ void matchDispatch(const Type *Node) {
+ matchWithoutFilter(QualType(Node, 0), Matchers->Type);
+ }
+ void matchDispatch(const TypeLoc *Node) {
+ matchWithoutFilter(*Node, Matchers->TypeLoc);
+ }
+ void matchDispatch(const QualType *Node) {
+ matchWithoutFilter(*Node, Matchers->Type);
+ }
+ void matchDispatch(const NestedNameSpecifier *Node) {
+ matchWithoutFilter(*Node, Matchers->NestedNameSpecifier);
+ }
+ void matchDispatch(const NestedNameSpecifierLoc *Node) {
+ matchWithoutFilter(*Node, Matchers->NestedNameSpecifierLoc);
+ }
+ void matchDispatch(const void *) { /* Do nothing. */ }
+ /// @}
+
// Returns whether an ancestor of \p Node matches \p Matcher.
//
// The order of matching ((which can lead to different nodes being bound in
@@ -497,11 +624,7 @@ private:
assert(Node.getMemoizationData() &&
"Invariant broken: only nodes that support memoization may be "
"used in the parent map.");
- ASTContext::ParentVector Parents = ActiveASTContext->getParents(Node);
- if (Parents.empty()) {
- assert(false && "Found node that is not in the parent map.");
- return false;
- }
+
MatchKey Key;
Key.MatcherID = Matcher.getID();
Key.Node = Node;
@@ -514,9 +637,13 @@ private:
*Builder = I->second.Nodes;
return I->second.ResultOfMatch;
}
+
MemoizedMatchResult Result;
Result.ResultOfMatch = false;
Result.Nodes = *Builder;
+
+ const auto &Parents = ActiveASTContext->getParents(Node);
+ assert(!Parents.empty() && "Found node that is not in the parent map.");
if (Parents.size() == 1) {
// Only one parent - do recursive memoization.
const ast_type_traits::DynTypedNode Parent = Parents[0];
@@ -543,25 +670,24 @@ private:
break;
}
if (MatchMode != ASTMatchFinder::AMM_ParentOnly) {
- ASTContext::ParentVector Ancestors =
- ActiveASTContext->getParents(Queue.front());
- for (ASTContext::ParentVector::const_iterator I = Ancestors.begin(),
- E = Ancestors.end();
- I != E; ++I) {
+ for (const auto &Parent :
+ ActiveASTContext->getParents(Queue.front())) {
// Make sure we do not visit the same node twice.
// Otherwise, we'll visit the common ancestors as often as there
// are splits on the way down.
- if (Visited.insert(I->getMemoizationData()).second)
- Queue.push_back(*I);
+ if (Visited.insert(Parent.getMemoizationData()).second)
+ Queue.push_back(Parent);
}
}
Queue.pop_front();
}
}
- ResultCache[Key] = Result;
- *Builder = Result.Nodes;
- return Result.ResultOfMatch;
+ MemoizedMatchResult &CachedResult = ResultCache[Key];
+ CachedResult = std::move(Result);
+
+ *Builder = CachedResult.Nodes;
+ return CachedResult.ResultOfMatch;
}
// Implements a BoundNodesTree::Visitor that calls a MatchCallback with
@@ -588,22 +714,35 @@ private:
BoundNodesTreeBuilder *Builder) {
const Type *const CanonicalType =
ActiveASTContext->getCanonicalType(TypeNode);
- const std::set<const TypedefNameDecl *> &Aliases =
- TypeAliases[CanonicalType];
- for (std::set<const TypedefNameDecl*>::const_iterator
- It = Aliases.begin(), End = Aliases.end();
- It != End; ++It) {
+ for (const TypedefNameDecl *Alias : TypeAliases.lookup(CanonicalType)) {
BoundNodesTreeBuilder Result(*Builder);
- if (Matcher.matches(**It, this, &Result)) {
- *Builder = Result;
+ if (Matcher.matches(*Alias, this, &Result)) {
+ *Builder = std::move(Result);
return true;
}
}
return false;
}
- std::vector<std::pair<internal::DynTypedMatcher, MatchCallback *> > *const
- MatcherCallbackPairs;
+ /// \brief Bucket to record map.
+ ///
+ /// Used to get the appropriate bucket for each matcher.
+ llvm::StringMap<llvm::TimeRecord> TimeByBucket;
+
+ const MatchFinder::MatchersByType *Matchers;
+
+ /// \brief Filtered list of matcher indices for each matcher kind.
+ ///
+ /// \c Decl and \c Stmt toplevel matchers usually apply to a specific node
+ /// kind (and derived kinds) so it is a waste to try every matcher on every
+ /// node.
+ /// We precalculate a list of matchers that pass the toplevel restrict check.
+ /// This also allows us to skip the restrict check at matching time. See
+ /// use \c matchesNoKindCheck() above.
+ llvm::DenseMap<ast_type_traits::ASTNodeKind, std::vector<unsigned short>>
+ MatcherFiltersMap;
+
+ const MatchFinder::MatchFinderOptions &Options;
ASTContext *ActiveASTContext;
// Maps a canonical type to its TypedefDecls.
@@ -680,7 +819,7 @@ bool MatchASTVisitor::classIsDerivedFrom(const CXXRecordDecl *Declaration,
}
BoundNodesTreeBuilder Result(*Builder);
if (Base.matches(*ClassDecl, this, &Result)) {
- *Builder = Result;
+ *Builder = std::move(Result);
return true;
}
if (classIsDerivedFrom(ClassDecl, Base, Builder))
@@ -731,7 +870,8 @@ bool MatchASTVisitor::TraverseNestedNameSpecifierLoc(
match(NNS);
// We only match the nested name specifier here (as opposed to traversing it)
// because the traversal is already done in the parallel "Loc"-hierarchy.
- match(*NNS.getNestedNameSpecifier());
+ if (NNS.hasQualifier())
+ match(*NNS.getNestedNameSpecifier());
return
RecursiveASTVisitor<MatchASTVisitor>::TraverseNestedNameSpecifierLoc(NNS);
}
@@ -765,38 +905,45 @@ MatchFinder::MatchResult::MatchResult(const BoundNodes &Nodes,
MatchFinder::MatchCallback::~MatchCallback() {}
MatchFinder::ParsingDoneTestCallback::~ParsingDoneTestCallback() {}
-MatchFinder::MatchFinder() : ParsingDone(nullptr) {}
+MatchFinder::MatchFinder(MatchFinderOptions Options)
+ : Options(std::move(Options)), ParsingDone(nullptr) {}
MatchFinder::~MatchFinder() {}
void MatchFinder::addMatcher(const DeclarationMatcher &NodeMatch,
MatchCallback *Action) {
- MatcherCallbackPairs.push_back(std::make_pair(NodeMatch, Action));
+ Matchers.DeclOrStmt.push_back(std::make_pair(NodeMatch, Action));
+ Matchers.AllCallbacks.push_back(Action);
}
void MatchFinder::addMatcher(const TypeMatcher &NodeMatch,
MatchCallback *Action) {
- MatcherCallbackPairs.push_back(std::make_pair(NodeMatch, Action));
+ Matchers.Type.push_back(std::make_pair(NodeMatch, Action));
+ Matchers.AllCallbacks.push_back(Action);
}
void MatchFinder::addMatcher(const StatementMatcher &NodeMatch,
MatchCallback *Action) {
- MatcherCallbackPairs.push_back(std::make_pair(NodeMatch, Action));
+ Matchers.DeclOrStmt.push_back(std::make_pair(NodeMatch, Action));
+ Matchers.AllCallbacks.push_back(Action);
}
void MatchFinder::addMatcher(const NestedNameSpecifierMatcher &NodeMatch,
MatchCallback *Action) {
- MatcherCallbackPairs.push_back(std::make_pair(NodeMatch, Action));
+ Matchers.NestedNameSpecifier.push_back(std::make_pair(NodeMatch, Action));
+ Matchers.AllCallbacks.push_back(Action);
}
void MatchFinder::addMatcher(const NestedNameSpecifierLocMatcher &NodeMatch,
MatchCallback *Action) {
- MatcherCallbackPairs.push_back(std::make_pair(NodeMatch, Action));
+ Matchers.NestedNameSpecifierLoc.push_back(std::make_pair(NodeMatch, Action));
+ Matchers.AllCallbacks.push_back(Action);
}
void MatchFinder::addMatcher(const TypeLocMatcher &NodeMatch,
MatchCallback *Action) {
- MatcherCallbackPairs.push_back(std::make_pair(NodeMatch, Action));
+ Matchers.TypeLoc.push_back(std::make_pair(NodeMatch, Action));
+ Matchers.AllCallbacks.push_back(Action);
}
bool MatchFinder::addDynamicMatcher(const internal::DynTypedMatcher &NodeMatch,
@@ -823,19 +970,19 @@ bool MatchFinder::addDynamicMatcher(const internal::DynTypedMatcher &NodeMatch,
return false;
}
-ASTConsumer *MatchFinder::newASTConsumer() {
- return new internal::MatchASTConsumer(this, ParsingDone);
+std::unique_ptr<ASTConsumer> MatchFinder::newASTConsumer() {
+ return llvm::make_unique<internal::MatchASTConsumer>(this, ParsingDone);
}
void MatchFinder::match(const clang::ast_type_traits::DynTypedNode &Node,
ASTContext &Context) {
- internal::MatchASTVisitor Visitor(&MatcherCallbackPairs);
+ internal::MatchASTVisitor Visitor(&Matchers, Options);
Visitor.set_active_ast_context(&Context);
Visitor.match(Node);
}
void MatchFinder::matchAST(ASTContext &Context) {
- internal::MatchASTVisitor Visitor(&MatcherCallbackPairs);
+ internal::MatchASTVisitor Visitor(&Matchers, Options);
Visitor.set_active_ast_context(&Context);
Visitor.onStartOfTranslationUnit();
Visitor.TraverseDecl(Context.getTranslationUnitDecl());
@@ -847,5 +994,7 @@ void MatchFinder::registerTestCallbackAfterParsing(
ParsingDone = NewParsingDone;
}
+StringRef MatchFinder::MatchCallback::getID() const { return "<unknown>"; }
+
} // end namespace ast_matchers
} // end namespace clang
diff --git a/lib/ASTMatchers/ASTMatchersInternal.cpp b/lib/ASTMatchers/ASTMatchersInternal.cpp
index 47b8b6d2f27a..2c482e38dc08 100644
--- a/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -13,25 +13,219 @@
#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ManagedStatic.h"
namespace clang {
namespace ast_matchers {
namespace internal {
+bool NotUnaryOperator(const ast_type_traits::DynTypedNode DynNode,
+ ASTMatchFinder *Finder, BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
+
+bool AllOfVariadicOperator(const ast_type_traits::DynTypedNode DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
+
+bool EachOfVariadicOperator(const ast_type_traits::DynTypedNode DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
+
+bool AnyOfVariadicOperator(const ast_type_traits::DynTypedNode DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
+
+
void BoundNodesTreeBuilder::visitMatches(Visitor *ResultVisitor) {
if (Bindings.empty())
Bindings.push_back(BoundNodesMap());
- for (unsigned i = 0, e = Bindings.size(); i != e; ++i) {
- ResultVisitor->visitMatch(BoundNodes(Bindings[i]));
+ for (BoundNodesMap &Binding : Bindings) {
+ ResultVisitor->visitMatch(BoundNodes(Binding));
}
}
-DynTypedMatcher::MatcherStorage::~MatcherStorage() {}
+namespace {
-void BoundNodesTreeBuilder::addMatch(const BoundNodesTreeBuilder &Other) {
- for (unsigned i = 0, e = Other.Bindings.size(); i != e; ++i) {
- Bindings.push_back(Other.Bindings[i]);
+typedef bool (*VariadicOperatorFunction)(
+ const ast_type_traits::DynTypedNode DynNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder, ArrayRef<DynTypedMatcher> InnerMatchers);
+
+template <VariadicOperatorFunction Func>
+class VariadicMatcher : public DynMatcherInterface {
+public:
+ VariadicMatcher(std::vector<DynTypedMatcher> InnerMatchers)
+ : InnerMatchers(std::move(InnerMatchers)) {}
+
+ bool dynMatches(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const override {
+ return Func(DynNode, Finder, Builder, InnerMatchers);
+ }
+
+private:
+ std::vector<DynTypedMatcher> InnerMatchers;
+};
+
+class IdDynMatcher : public DynMatcherInterface {
+ public:
+ IdDynMatcher(StringRef ID,
+ const IntrusiveRefCntPtr<DynMatcherInterface> &InnerMatcher)
+ : ID(ID), InnerMatcher(InnerMatcher) {}
+
+ bool dynMatches(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const override {
+ bool Result = InnerMatcher->dynMatches(DynNode, Finder, Builder);
+ if (Result) Builder->setBinding(ID, DynNode);
+ return Result;
+ }
+
+ private:
+ const std::string ID;
+ const IntrusiveRefCntPtr<DynMatcherInterface> InnerMatcher;
+};
+
+/// \brief A matcher that always returns true.
+///
+/// We only ever need one instance of this matcher, so we create a global one
+/// and reuse it to reduce the overhead of the matcher and increase the chance
+/// of cache hits.
+class TrueMatcherImpl : public DynMatcherInterface {
+public:
+ TrueMatcherImpl() {
+ Retain(); // Reference count will never become zero.
+ }
+ bool dynMatches(const ast_type_traits::DynTypedNode &, ASTMatchFinder *,
+ BoundNodesTreeBuilder *) const override {
+ return true;
+ }
+};
+static llvm::ManagedStatic<TrueMatcherImpl> TrueMatcherInstance;
+
+} // namespace
+
+DynTypedMatcher DynTypedMatcher::constructVariadic(
+ DynTypedMatcher::VariadicOperator Op,
+ std::vector<DynTypedMatcher> InnerMatchers) {
+ assert(InnerMatchers.size() > 0 && "Array must not be empty.");
+ assert(std::all_of(InnerMatchers.begin(), InnerMatchers.end(),
+ [&InnerMatchers](const DynTypedMatcher &M) {
+ return InnerMatchers[0].SupportedKind.isSame(M.SupportedKind);
+ }) &&
+ "SupportedKind must match!");
+
+ auto SupportedKind = InnerMatchers[0].SupportedKind;
+ // We must relax the restrict kind here.
+ // The different operators might deal differently with a mismatch.
+ // Make it the same as SupportedKind, since that is the broadest type we are
+ // allowed to accept.
+ auto RestrictKind = SupportedKind;
+
+ switch (Op) {
+ case VO_AllOf:
+ // In the case of allOf() we must pass all the checks, so making
+ // RestrictKind the most restrictive can save us time. This way we reject
+ // invalid types earlier and we can elide the kind checks inside the
+ // matcher.
+ for (auto &IM : InnerMatchers) {
+ RestrictKind = ast_type_traits::ASTNodeKind::getMostDerivedType(
+ RestrictKind, IM.RestrictKind);
+ }
+ return DynTypedMatcher(
+ SupportedKind, RestrictKind,
+ new VariadicMatcher<AllOfVariadicOperator>(std::move(InnerMatchers)));
+
+ case VO_AnyOf:
+ return DynTypedMatcher(
+ SupportedKind, RestrictKind,
+ new VariadicMatcher<AnyOfVariadicOperator>(std::move(InnerMatchers)));
+
+ case VO_EachOf:
+ return DynTypedMatcher(
+ SupportedKind, RestrictKind,
+ new VariadicMatcher<EachOfVariadicOperator>(std::move(InnerMatchers)));
+
+ case VO_UnaryNot:
+ // FIXME: Implement the Not operator to take a single matcher instead of a
+ // vector.
+ return DynTypedMatcher(
+ SupportedKind, RestrictKind,
+ new VariadicMatcher<NotUnaryOperator>(std::move(InnerMatchers)));
}
+ llvm_unreachable("Invalid Op value.");
+}
+
+DynTypedMatcher DynTypedMatcher::trueMatcher(
+ ast_type_traits::ASTNodeKind NodeKind) {
+ return DynTypedMatcher(NodeKind, NodeKind, &*TrueMatcherInstance);
+}
+
+bool DynTypedMatcher::canMatchNodesOfKind(
+ ast_type_traits::ASTNodeKind Kind) const {
+ return RestrictKind.isBaseOf(Kind);
+}
+
+DynTypedMatcher DynTypedMatcher::dynCastTo(
+ const ast_type_traits::ASTNodeKind Kind) const {
+ auto Copy = *this;
+ Copy.SupportedKind = Kind;
+ Copy.RestrictKind =
+ ast_type_traits::ASTNodeKind::getMostDerivedType(Kind, RestrictKind);
+ return Copy;
+}
+
+bool DynTypedMatcher::matches(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ if (RestrictKind.isBaseOf(DynNode.getNodeKind()) &&
+ Implementation->dynMatches(DynNode, Finder, Builder)) {
+ return true;
+ }
+ // Delete all bindings when a matcher does not match.
+ // This prevents unexpected exposure of bound nodes in unmatches
+ // branches of the match tree.
+ Builder->removeBindings([](const BoundNodesMap &) { return true; });
+ return false;
+}
+
+bool DynTypedMatcher::matchesNoKindCheck(
+ const ast_type_traits::DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ assert(RestrictKind.isBaseOf(DynNode.getNodeKind()));
+ if (Implementation->dynMatches(DynNode, Finder, Builder)) {
+ return true;
+ }
+ // Delete all bindings when a matcher does not match.
+ // This prevents unexpected exposure of bound nodes in unmatches
+ // branches of the match tree.
+ Builder->removeBindings([](const BoundNodesMap &) { return true; });
+ return false;
+}
+
+llvm::Optional<DynTypedMatcher> DynTypedMatcher::tryBind(StringRef ID) const {
+ if (!AllowBind) return llvm::None;
+ auto Result = *this;
+ Result.Implementation = new IdDynMatcher(ID, Result.Implementation);
+ return Result;
+}
+
+bool DynTypedMatcher::canConvertTo(ast_type_traits::ASTNodeKind To) const {
+ const auto From = getSupportedKind();
+ auto QualKind = ast_type_traits::ASTNodeKind::getFromNodeKind<QualType>();
+ auto TypeKind = ast_type_traits::ASTNodeKind::getFromNodeKind<Type>();
+ /// Mimic the implicit conversions of Matcher<>.
+ /// - From Matcher<Type> to Matcher<QualType>
+ if (From.isSame(TypeKind) && To.isSame(QualKind)) return true;
+ /// - From Matcher<Base> to Matcher<Derived>
+ return From.isBaseOf(To);
+}
+
+void BoundNodesTreeBuilder::addMatch(const BoundNodesTreeBuilder &Other) {
+ Bindings.append(Other.Bindings.begin(), Other.Bindings.end());
}
bool NotUnaryOperator(const ast_type_traits::DynTypedNode DynNode,
@@ -61,8 +255,8 @@ bool AllOfVariadicOperator(const ast_type_traits::DynTypedNode DynNode,
// allOf leads to one matcher for each alternative in the first
// matcher combined with each alternative in the second matcher.
// Thus, we can reuse the same Builder.
- for (size_t i = 0, e = InnerMatchers.size(); i != e; ++i) {
- if (!InnerMatchers[i].matches(DynNode, Finder, Builder))
+ for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
+ if (!InnerMatcher.matchesNoKindCheck(DynNode, Finder, Builder))
return false;
}
return true;
@@ -74,14 +268,14 @@ bool EachOfVariadicOperator(const ast_type_traits::DynTypedNode DynNode,
ArrayRef<DynTypedMatcher> InnerMatchers) {
BoundNodesTreeBuilder Result;
bool Matched = false;
- for (size_t i = 0, e = InnerMatchers.size(); i != e; ++i) {
+ for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
BoundNodesTreeBuilder BuilderInner(*Builder);
- if (InnerMatchers[i].matches(DynNode, Finder, &BuilderInner)) {
+ if (InnerMatcher.matches(DynNode, Finder, &BuilderInner)) {
Matched = true;
Result.addMatch(BuilderInner);
}
}
- *Builder = Result;
+ *Builder = std::move(Result);
return Matched;
}
@@ -89,16 +283,62 @@ bool AnyOfVariadicOperator(const ast_type_traits::DynTypedNode DynNode,
ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers) {
- for (size_t i = 0, e = InnerMatchers.size(); i != e; ++i) {
+ for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
BoundNodesTreeBuilder Result = *Builder;
- if (InnerMatchers[i].matches(DynNode, Finder, &Result)) {
- *Builder = Result;
+ if (InnerMatcher.matches(DynNode, Finder, &Result)) {
+ *Builder = std::move(Result);
return true;
}
}
return false;
}
+HasNameMatcher::HasNameMatcher(StringRef NameRef)
+ : UseUnqualifiedMatch(NameRef.find("::") == NameRef.npos), Name(NameRef) {
+ assert(!Name.empty());
+}
+
+bool HasNameMatcher::matchesNodeUnqualified(const NamedDecl &Node) const {
+ assert(UseUnqualifiedMatch);
+ if (Node.getIdentifier()) {
+ // Simple name.
+ return Name == Node.getName();
+ }
+ if (Node.getDeclName()) {
+ // Name needs to be constructed.
+ llvm::SmallString<128> NodeName;
+ llvm::raw_svector_ostream OS(NodeName);
+ Node.printName(OS);
+ return Name == OS.str();
+ }
+ return false;
+}
+
+bool HasNameMatcher::matchesNodeFull(const NamedDecl &Node) const {
+ llvm::SmallString<128> NodeName = StringRef("::");
+ llvm::raw_svector_ostream OS(NodeName);
+ Node.printQualifiedName(OS);
+ const StringRef FullName = OS.str();
+ const StringRef Pattern = Name;
+
+ if (Pattern.startswith("::"))
+ return FullName == Pattern;
+
+ return FullName.endswith(Pattern) &&
+ FullName.drop_back(Pattern.size()).endswith("::");
+}
+
+bool HasNameMatcher::matchesNode(const NamedDecl &Node) const {
+ // FIXME: There is still room for improvement, but it would require copying a
+ // lot of the logic from NamedDecl::printQualifiedName(). The benchmarks do
+ // not show like that extra complexity is needed right now.
+ if (UseUnqualifiedMatch) {
+ assert(matchesNodeUnqualified(Node) == matchesNodeFull(Node));
+ return matchesNodeUnqualified(Node);
+ }
+ return matchesNodeFull(Node);
+}
+
} // end namespace internal
} // end namespace ast_matchers
} // end namespace clang
diff --git a/lib/ASTMatchers/Dynamic/Marshallers.h b/lib/ASTMatchers/Dynamic/Marshallers.h
index 6e144cd26ed9..b78bc0381990 100644
--- a/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -17,8 +17,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_MATCHERS_DYNAMIC_MARSHALLERS_H
-#define LLVM_CLANG_AST_MATCHERS_DYNAMIC_MARSHALLERS_H
+#ifndef LLVM_CLANG_LIB_ASTMATCHERS_DYNAMIC_MARSHALLERS_H
+#define LLVM_CLANG_LIB_ASTMATCHERS_DYNAMIC_MARSHALLERS_H
#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/ASTMatchers/Dynamic/Diagnostics.h"
@@ -30,48 +30,8 @@
namespace clang {
namespace ast_matchers {
namespace dynamic {
-
namespace internal {
-struct ArgKind {
- enum Kind {
- AK_Matcher,
- AK_Unsigned,
- AK_String
- };
- ArgKind(Kind K)
- : K(K) {}
- ArgKind(ast_type_traits::ASTNodeKind MatcherKind)
- : K(AK_Matcher), MatcherKind(MatcherKind) {}
-
- std::string asString() const {
- switch (getArgKind()) {
- case AK_Matcher:
- return (Twine("Matcher<") + MatcherKind.asStringRef() + ">").str();
- case AK_Unsigned:
- return "unsigned";
- case AK_String:
- return "string";
- }
- llvm_unreachable("unhandled ArgKind");
- }
-
- Kind getArgKind() const { return K; }
- ast_type_traits::ASTNodeKind getMatcherKind() const {
- assert(K == AK_Matcher);
- return MatcherKind;
- }
-
- bool operator<(const ArgKind &Other) const {
- if (K == AK_Matcher && Other.K == AK_Matcher)
- return MatcherKind < Other.MatcherKind;
- return K < Other.K;
- }
-
-private:
- Kind K;
- ast_type_traits::ASTNodeKind MatcherKind;
-};
/// \brief Helper template class to just from argument type to the right is/get
/// functions in VariantValue.
@@ -116,6 +76,27 @@ template <> struct ArgTypeTraits<unsigned> {
}
};
+template <> struct ArgTypeTraits<attr::Kind> {
+private:
+ static attr::Kind getAttrKind(llvm::StringRef AttrKind) {
+ return llvm::StringSwitch<attr::Kind>(AttrKind)
+#define ATTR(X) .Case("attr::" #X, attr:: X)
+#include "clang/Basic/AttrList.inc"
+ .Default(attr::Kind(-1));
+ }
+public:
+ static bool is(const VariantValue &Value) {
+ return Value.isString() &&
+ getAttrKind(Value.getString()) != attr::Kind(-1);
+ }
+ static attr::Kind get(const VariantValue &Value) {
+ return getAttrKind(Value.getString());
+ }
+ static ArgKind getKind() {
+ return ArgKind(ArgKind::AK_String);
+ }
+};
+
/// \brief Matcher descriptor interface.
///
/// Provides a \c create() method that constructs the matcher from the provided
@@ -161,16 +142,10 @@ inline bool isRetKindConvertibleTo(
ArrayRef<ast_type_traits::ASTNodeKind> RetKinds,
ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
ast_type_traits::ASTNodeKind *LeastDerivedKind) {
- for (ArrayRef<ast_type_traits::ASTNodeKind>::const_iterator
- i = RetKinds.begin(),
- e = RetKinds.end();
- i != e; ++i) {
- unsigned Distance;
- if (i->isBaseOf(Kind, &Distance)) {
- if (Specificity)
- *Specificity = 100 - Distance;
+ for (const ast_type_traits::ASTNodeKind &NodeKind : RetKinds) {
+ if (ArgKind(NodeKind).isConvertibleTo(Kind, Specificity)) {
if (LeastDerivedKind)
- *LeastDerivedKind = *i;
+ *LeastDerivedKind = NodeKind;
return true;
}
}
@@ -322,8 +297,8 @@ variadicMatcherDescriptor(StringRef MatcherName, const SourceRange &NameRange,
VariantMatcher Out;
if (!HasError) {
- Out = outvalueToVariantMatcher(
- Func(ArrayRef<const ArgT *>(InnerArgs, Args.size())));
+ Out = outvalueToVariantMatcher(Func(llvm::makeArrayRef(InnerArgs,
+ Args.size())));
}
for (size_t i = 0, e = Args.size(); i != e; ++i) {
@@ -498,7 +473,7 @@ private:
template <typename FromTypeList>
inline void collect(FromTypeList);
- const StringRef Name;
+ StringRef Name;
std::vector<MatcherDescriptor *> &Out;
};
@@ -581,15 +556,15 @@ private:
/// \brief Variadic operator marshaller function.
class VariadicOperatorMatcherDescriptor : public MatcherDescriptor {
public:
- typedef ast_matchers::internal::VariadicOperatorFunction VarFunc;
+ typedef DynTypedMatcher::VariadicOperator VarOp;
VariadicOperatorMatcherDescriptor(unsigned MinCount, unsigned MaxCount,
- VarFunc Func, StringRef MatcherName)
- : MinCount(MinCount), MaxCount(MaxCount), Func(Func),
+ VarOp Op, StringRef MatcherName)
+ : MinCount(MinCount), MaxCount(MaxCount), Op(Op),
MatcherName(MatcherName) {}
virtual VariantMatcher create(const SourceRange &NameRange,
ArrayRef<ParserValue> Args,
- Diagnostics *Error) const {
+ Diagnostics *Error) const override {
if (Args.size() < MinCount || MaxCount < Args.size()) {
const std::string MaxStr =
(MaxCount == UINT_MAX ? "" : Twine(MaxCount)).str();
@@ -609,17 +584,17 @@ public:
}
InnerArgs.push_back(Value.getMatcher());
}
- return VariantMatcher::VariadicOperatorMatcher(Func, std::move(InnerArgs));
+ return VariantMatcher::VariadicOperatorMatcher(Op, std::move(InnerArgs));
}
- bool isVariadic() const { return true; }
- unsigned getNumArgs() const { return 0; }
+ bool isVariadic() const override { return true; }
+ unsigned getNumArgs() const override { return 0; }
void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
- std::vector<ArgKind> &Kinds) const {
+ std::vector<ArgKind> &Kinds) const override {
Kinds.push_back(ThisKind);
}
bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) const {
+ ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
if (Specificity)
*Specificity = 1;
if (LeastDerivedKind)
@@ -631,7 +606,7 @@ public:
private:
const unsigned MinCount;
const unsigned MaxCount;
- const VarFunc Func;
+ const VarOp Op;
const StringRef MatcherName;
};
@@ -724,7 +699,7 @@ MatcherDescriptor *
makeMatcherAutoMarshall(ast_matchers::internal::VariadicOperatorMatcherFunc<
MinCount, MaxCount> Func,
StringRef MatcherName) {
- return new VariadicOperatorMatcherDescriptor(MinCount, MaxCount, Func.Func,
+ return new VariadicOperatorMatcherDescriptor(MinCount, MaxCount, Func.Op,
MatcherName);
}
diff --git a/lib/ASTMatchers/Dynamic/Parser.cpp b/lib/ASTMatchers/Dynamic/Parser.cpp
index 25629d99a7cc..9930c530c081 100644
--- a/lib/ASTMatchers/Dynamic/Parser.cpp
+++ b/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -17,6 +17,7 @@
#include "clang/Basic/CharInfo.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Support/ManagedStatic.h"
#include <string>
#include <vector>
@@ -258,8 +259,14 @@ private:
Parser::Sema::~Sema() {}
-VariantValue Parser::Sema::getNamedValue(StringRef Name) {
- return VariantValue();
+std::vector<ArgKind> Parser::Sema::getAcceptedCompletionTypes(
+ llvm::ArrayRef<std::pair<MatcherCtor, unsigned>> Context) {
+ return std::vector<ArgKind>();
+}
+
+std::vector<MatcherCompletion>
+Parser::Sema::getMatcherCompletions(llvm::ArrayRef<ArgKind> AcceptedTypes) {
+ return std::vector<MatcherCompletion>();
}
struct Parser::ScopedContextEntry {
@@ -288,7 +295,9 @@ bool Parser::parseIdentifierPrefixImpl(VariantValue *Value) {
if (Tokenizer->nextTokenKind() != TokenInfo::TK_OpenParen) {
// Parse as a named value.
- if (const VariantValue NamedValue = S->getNamedValue(NameToken.Text)) {
+ if (const VariantValue NamedValue =
+ NamedValues ? NamedValues->lookup(NameToken.Text)
+ : VariantValue()) {
*Value = NamedValue;
return true;
}
@@ -379,7 +388,7 @@ bool Parser::parseMatcherExpressionImpl(const TokenInfo &NameToken,
Tokenizer->consumeNextToken(); // consume the period.
const TokenInfo BindToken = Tokenizer->consumeNextToken();
if (BindToken.Kind == TokenInfo::TK_CodeCompletion) {
- addCompletion(BindToken, "bind(\"", "bind");
+ addCompletion(BindToken, MatcherCompletion("bind(\"", "bind", 1));
return false;
}
@@ -427,13 +436,28 @@ bool Parser::parseMatcherExpressionImpl(const TokenInfo &NameToken,
// If the prefix of this completion matches the completion token, add it to
// Completions minus the prefix.
-void Parser::addCompletion(const TokenInfo &CompToken, StringRef TypedText,
- StringRef Decl) {
- if (TypedText.size() >= CompToken.Text.size() &&
- TypedText.substr(0, CompToken.Text.size()) == CompToken.Text) {
- Completions.push_back(
- MatcherCompletion(TypedText.substr(CompToken.Text.size()), Decl));
+void Parser::addCompletion(const TokenInfo &CompToken,
+ const MatcherCompletion& Completion) {
+ if (StringRef(Completion.TypedText).startswith(CompToken.Text) &&
+ Completion.Specificity > 0) {
+ Completions.emplace_back(Completion.TypedText.substr(CompToken.Text.size()),
+ Completion.MatcherDecl, Completion.Specificity);
+ }
+}
+
+std::vector<MatcherCompletion> Parser::getNamedValueCompletions(
+ ArrayRef<ArgKind> AcceptedTypes) {
+ if (!NamedValues) return std::vector<MatcherCompletion>();
+ std::vector<MatcherCompletion> Result;
+ for (const auto &Entry : *NamedValues) {
+ unsigned Specificity;
+ if (Entry.getValue().isConvertibleTo(AcceptedTypes, &Specificity)) {
+ std::string Decl =
+ (Entry.getValue().getTypeAsString() + " " + Entry.getKey()).str();
+ Result.emplace_back(Entry.getKey(), Decl, Specificity);
+ }
}
+ return Result;
}
void Parser::addExpressionCompletions() {
@@ -449,12 +473,13 @@ void Parser::addExpressionCompletions() {
return;
}
- std::vector<MatcherCompletion> RegCompletions =
- Registry::getCompletions(ContextStack);
- for (std::vector<MatcherCompletion>::iterator I = RegCompletions.begin(),
- E = RegCompletions.end();
- I != E; ++I) {
- addCompletion(CompToken, I->TypedText, I->MatcherDecl);
+ auto AcceptedTypes = S->getAcceptedCompletionTypes(ContextStack);
+ for (const auto &Completion : S->getMatcherCompletions(AcceptedTypes)) {
+ addCompletion(CompToken, Completion);
+ }
+
+ for (const auto &Completion : getNamedValueCompletions(AcceptedTypes)) {
+ addCompletion(CompToken, Completion);
}
}
@@ -494,9 +519,12 @@ bool Parser::parseExpressionImpl(VariantValue *Value) {
llvm_unreachable("Unknown token kind.");
}
+static llvm::ManagedStatic<Parser::RegistrySema> DefaultRegistrySema;
+
Parser::Parser(CodeTokenizer *Tokenizer, Sema *S,
- Diagnostics *Error)
- : Tokenizer(Tokenizer), S(S), Error(Error) {}
+ const NamedValueMap *NamedValues, Diagnostics *Error)
+ : Tokenizer(Tokenizer), S(S ? S : &*DefaultRegistrySema),
+ NamedValues(NamedValues), Error(Error) {}
Parser::RegistrySema::~RegistrySema() {}
@@ -516,16 +544,22 @@ VariantMatcher Parser::RegistrySema::actOnMatcherExpression(
}
}
-bool Parser::parseExpression(StringRef Code, VariantValue *Value,
- Diagnostics *Error) {
- RegistrySema S;
- return parseExpression(Code, &S, Value, Error);
+std::vector<ArgKind> Parser::RegistrySema::getAcceptedCompletionTypes(
+ ArrayRef<std::pair<MatcherCtor, unsigned>> Context) {
+ return Registry::getAcceptedCompletionTypes(Context);
+}
+
+std::vector<MatcherCompletion> Parser::RegistrySema::getMatcherCompletions(
+ ArrayRef<ArgKind> AcceptedTypes) {
+ return Registry::getMatcherCompletions(AcceptedTypes);
}
bool Parser::parseExpression(StringRef Code, Sema *S,
+ const NamedValueMap *NamedValues,
VariantValue *Value, Diagnostics *Error) {
CodeTokenizer Tokenizer(Code, Error);
- if (!Parser(&Tokenizer, S, Error).parseExpressionImpl(Value)) return false;
+ if (!Parser(&Tokenizer, S, NamedValues, Error).parseExpressionImpl(Value))
+ return false;
if (Tokenizer.peekNextToken().Kind != TokenInfo::TK_Eof) {
Error->addError(Tokenizer.peekNextToken().Range,
Error->ET_ParserTrailingCode);
@@ -535,28 +569,31 @@ bool Parser::parseExpression(StringRef Code, Sema *S,
}
std::vector<MatcherCompletion>
-Parser::completeExpression(StringRef Code, unsigned CompletionOffset) {
+Parser::completeExpression(StringRef Code, unsigned CompletionOffset, Sema *S,
+ const NamedValueMap *NamedValues) {
Diagnostics Error;
CodeTokenizer Tokenizer(Code, &Error, CompletionOffset);
- RegistrySema S;
- Parser P(&Tokenizer, &S, &Error);
+ Parser P(&Tokenizer, S, NamedValues, &Error);
VariantValue Dummy;
P.parseExpressionImpl(&Dummy);
- return P.Completions;
-}
+ // Sort by specificity, then by name.
+ std::sort(P.Completions.begin(), P.Completions.end(),
+ [](const MatcherCompletion &A, const MatcherCompletion &B) {
+ if (A.Specificity != B.Specificity)
+ return A.Specificity > B.Specificity;
+ return A.TypedText < B.TypedText;
+ });
-llvm::Optional<DynTypedMatcher>
-Parser::parseMatcherExpression(StringRef Code, Diagnostics *Error) {
- RegistrySema S;
- return parseMatcherExpression(Code, &S, Error);
+ return P.Completions;
}
llvm::Optional<DynTypedMatcher>
-Parser::parseMatcherExpression(StringRef Code, Parser::Sema *S,
+Parser::parseMatcherExpression(StringRef Code, Sema *S,
+ const NamedValueMap *NamedValues,
Diagnostics *Error) {
VariantValue Value;
- if (!parseExpression(Code, S, &Value, Error))
+ if (!parseExpression(Code, S, NamedValues, &Value, Error))
return llvm::Optional<DynTypedMatcher>();
if (!Value.isMatcher()) {
Error->addError(SourceRange(), Error->ET_ParserNotAMatcher);
diff --git a/lib/ASTMatchers/Dynamic/Registry.cpp b/lib/ASTMatchers/Dynamic/Registry.cpp
index 4bc50a0f2a96..d550a89cad49 100644
--- a/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -101,8 +101,8 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(argumentCountIs);
REGISTER_MATCHER(arraySubscriptExpr);
REGISTER_MATCHER(arrayType);
- REGISTER_MATCHER(asString);
REGISTER_MATCHER(asmStmt);
+ REGISTER_MATCHER(asString);
REGISTER_MATCHER(atomicType);
REGISTER_MATCHER(autoType);
REGISTER_MATCHER(binaryOperator);
@@ -111,7 +111,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(boolLiteral);
REGISTER_MATCHER(breakStmt);
REGISTER_MATCHER(builtinType);
- REGISTER_MATCHER(cStyleCastExpr);
REGISTER_MATCHER(callExpr);
REGISTER_MATCHER(caseStmt);
REGISTER_MATCHER(castExpr);
@@ -123,18 +122,20 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(compoundLiteralExpr);
REGISTER_MATCHER(compoundStmt);
REGISTER_MATCHER(conditionalOperator);
- REGISTER_MATCHER(constCastExpr);
REGISTER_MATCHER(constantArrayType);
+ REGISTER_MATCHER(constCastExpr);
REGISTER_MATCHER(constructExpr);
REGISTER_MATCHER(constructorDecl);
REGISTER_MATCHER(containsDeclaration);
REGISTER_MATCHER(continueStmt);
+ REGISTER_MATCHER(cStyleCastExpr);
REGISTER_MATCHER(ctorInitializer);
+ REGISTER_MATCHER(CUDAKernelCallExpr);
REGISTER_MATCHER(decl);
+ REGISTER_MATCHER(declaratorDecl);
REGISTER_MATCHER(declCountIs);
REGISTER_MATCHER(declRefExpr);
REGISTER_MATCHER(declStmt);
- REGISTER_MATCHER(declaratorDecl);
REGISTER_MATCHER(defaultArgExpr);
REGISTER_MATCHER(defaultStmt);
REGISTER_MATCHER(deleteExpr);
@@ -147,6 +148,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(enumConstantDecl);
REGISTER_MATCHER(enumDecl);
REGISTER_MATCHER(equalsBoundNode);
+ REGISTER_MATCHER(equalsIntegralValue);
REGISTER_MATCHER(explicitCastExpr);
REGISTER_MATCHER(expr);
REGISTER_MATCHER(exprWithCleanups);
@@ -160,10 +162,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(forRangeStmt);
REGISTER_MATCHER(forStmt);
REGISTER_MATCHER(friendDecl);
+ REGISTER_MATCHER(functionalCastExpr);
REGISTER_MATCHER(functionDecl);
REGISTER_MATCHER(functionTemplateDecl);
REGISTER_MATCHER(functionType);
- REGISTER_MATCHER(functionalCastExpr);
REGISTER_MATCHER(gotoStmt);
REGISTER_MATCHER(has);
REGISTER_MATCHER(hasAncestor);
@@ -175,19 +177,21 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasAnyUsingShadowDecl);
REGISTER_MATCHER(hasArgument);
REGISTER_MATCHER(hasArgumentOfType);
+ REGISTER_MATCHER(hasAttr);
REGISTER_MATCHER(hasBase);
REGISTER_MATCHER(hasBody);
REGISTER_MATCHER(hasCanonicalType);
REGISTER_MATCHER(hasCaseConstant);
REGISTER_MATCHER(hasCondition);
REGISTER_MATCHER(hasConditionVariableStatement);
- REGISTER_MATCHER(hasDeclContext);
REGISTER_MATCHER(hasDeclaration);
+ REGISTER_MATCHER(hasDeclContext);
REGISTER_MATCHER(hasDeducedType);
REGISTER_MATCHER(hasDescendant);
REGISTER_MATCHER(hasDestinationType);
REGISTER_MATCHER(hasEitherOperand);
REGISTER_MATCHER(hasElementType);
+ REGISTER_MATCHER(hasElse);
REGISTER_MATCHER(hasFalseExpression);
REGISTER_MATCHER(hasGlobalStorage);
REGISTER_MATCHER(hasImplicitDestinationType);
@@ -198,6 +202,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasLocalQualifiers);
REGISTER_MATCHER(hasLocalStorage);
REGISTER_MATCHER(hasLoopInit);
+ REGISTER_MATCHER(hasLoopVariable);
REGISTER_MATCHER(hasMethod);
REGISTER_MATCHER(hasName);
REGISTER_MATCHER(hasObjectExpression);
@@ -206,6 +211,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasParameter);
REGISTER_MATCHER(hasParent);
REGISTER_MATCHER(hasQualifier);
+ REGISTER_MATCHER(hasRangeInit);
REGISTER_MATCHER(hasRHS);
REGISTER_MATCHER(hasSingleDecl);
REGISTER_MATCHER(hasSize);
@@ -213,6 +219,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasSourceExpression);
REGISTER_MATCHER(hasTargetDecl);
REGISTER_MATCHER(hasTemplateArgument);
+ REGISTER_MATCHER(hasThen);
REGISTER_MATCHER(hasTrueExpression);
REGISTER_MATCHER(hasTypeLoc);
REGISTER_MATCHER(hasUnaryOperand);
@@ -230,22 +237,30 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isConst);
REGISTER_MATCHER(isConstQualified);
REGISTER_MATCHER(isDefinition);
+ REGISTER_MATCHER(isDeleted);
REGISTER_MATCHER(isExplicitTemplateSpecialization);
REGISTER_MATCHER(isExpr);
REGISTER_MATCHER(isExternC);
REGISTER_MATCHER(isImplicit);
+ REGISTER_MATCHER(isExpansionInFileMatching);
+ REGISTER_MATCHER(isExpansionInMainFile);
+ REGISTER_MATCHER(isInstantiated);
+ REGISTER_MATCHER(isExpansionInSystemHeader);
REGISTER_MATCHER(isInteger);
+ REGISTER_MATCHER(isIntegral);
+ REGISTER_MATCHER(isInTemplateInstantiation);
REGISTER_MATCHER(isListInitialization);
REGISTER_MATCHER(isOverride);
REGISTER_MATCHER(isPrivate);
REGISTER_MATCHER(isProtected);
REGISTER_MATCHER(isPublic);
+ REGISTER_MATCHER(isPure);
REGISTER_MATCHER(isTemplateInstantiation);
REGISTER_MATCHER(isVirtual);
REGISTER_MATCHER(isWritten);
- REGISTER_MATCHER(lValueReferenceType);
REGISTER_MATCHER(labelStmt);
REGISTER_MATCHER(lambdaExpr);
+ REGISTER_MATCHER(lValueReferenceType);
REGISTER_MATCHER(matchesName);
REGISTER_MATCHER(materializeTemporaryExpr);
REGISTER_MATCHER(member);
@@ -254,8 +269,8 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(memberPointerType);
REGISTER_MATCHER(methodDecl);
REGISTER_MATCHER(namedDecl);
- REGISTER_MATCHER(namesType);
REGISTER_MATCHER(namespaceDecl);
+ REGISTER_MATCHER(namesType);
REGISTER_MATCHER(nestedNameSpecifier);
REGISTER_MATCHER(nestedNameSpecifierLoc);
REGISTER_MATCHER(newExpr);
@@ -271,15 +286,16 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(pointee);
REGISTER_MATCHER(pointerType);
REGISTER_MATCHER(qualType);
- REGISTER_MATCHER(rValueReferenceType);
REGISTER_MATCHER(recordDecl);
REGISTER_MATCHER(recordType);
REGISTER_MATCHER(referenceType);
REGISTER_MATCHER(refersToDeclaration);
+ REGISTER_MATCHER(refersToIntegralType);
REGISTER_MATCHER(refersToType);
REGISTER_MATCHER(reinterpretCastExpr);
- REGISTER_MATCHER(returnStmt);
REGISTER_MATCHER(returns);
+ REGISTER_MATCHER(returnStmt);
+ REGISTER_MATCHER(rValueReferenceType);
REGISTER_MATCHER(sizeOfExpr);
REGISTER_MATCHER(specifiesNamespace);
REGISTER_MATCHER(specifiesType);
@@ -288,8 +304,11 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(staticCastExpr);
REGISTER_MATCHER(stmt);
REGISTER_MATCHER(stringLiteral);
+ REGISTER_MATCHER(substNonTypeTemplateParmExpr);
REGISTER_MATCHER(switchCase);
REGISTER_MATCHER(switchStmt);
+ REGISTER_MATCHER(templateArgument);
+ REGISTER_MATCHER(templateArgumentCountIs);
REGISTER_MATCHER(templateSpecializationType);
REGISTER_MATCHER(temporaryObjectExpr);
REGISTER_MATCHER(thisExpr);
@@ -298,8 +317,9 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(to);
REGISTER_MATCHER(tryStmt);
REGISTER_MATCHER(type);
- REGISTER_MATCHER(typeLoc);
+ REGISTER_MATCHER(typedefDecl);
REGISTER_MATCHER(typedefType);
+ REGISTER_MATCHER(typeLoc);
REGISTER_MATCHER(unaryExprOrTypeTraitExpr);
REGISTER_MATCHER(unaryOperator);
REGISTER_MATCHER(unaryTransformType);
@@ -308,8 +328,11 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(unresolvedUsingValueDecl);
REGISTER_MATCHER(userDefinedLiteral);
REGISTER_MATCHER(usingDecl);
+ REGISTER_MATCHER(usingDirectiveDecl);
+ REGISTER_MATCHER(valueDecl);
REGISTER_MATCHER(varDecl);
REGISTER_MATCHER(variableArrayType);
+ REGISTER_MATCHER(voidType);
REGISTER_MATCHER(whileStmt);
REGISTER_MATCHER(withInitializer);
}
@@ -353,77 +376,63 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
return OS;
}
-struct ReverseSpecificityThenName {
- bool operator()(const std::pair<unsigned, std::string> &A,
- const std::pair<unsigned, std::string> &B) const {
- return A.first > B.first || (A.first == B.first && A.second < B.second);
- }
-};
-
-}
+} // namespace
-std::vector<MatcherCompletion> Registry::getCompletions(
- ArrayRef<std::pair<MatcherCtor, unsigned> > Context) {
+std::vector<ArgKind> Registry::getAcceptedCompletionTypes(
+ ArrayRef<std::pair<MatcherCtor, unsigned>> Context) {
ASTNodeKind InitialTypes[] = {
- ASTNodeKind::getFromNodeKind<Decl>(),
- ASTNodeKind::getFromNodeKind<QualType>(),
- ASTNodeKind::getFromNodeKind<Type>(),
- ASTNodeKind::getFromNodeKind<Stmt>(),
- ASTNodeKind::getFromNodeKind<NestedNameSpecifier>(),
- ASTNodeKind::getFromNodeKind<NestedNameSpecifierLoc>(),
- ASTNodeKind::getFromNodeKind<TypeLoc>()
- };
- ArrayRef<ASTNodeKind> InitialTypesRef(InitialTypes);
+ ASTNodeKind::getFromNodeKind<Decl>(),
+ ASTNodeKind::getFromNodeKind<QualType>(),
+ ASTNodeKind::getFromNodeKind<Type>(),
+ ASTNodeKind::getFromNodeKind<Stmt>(),
+ ASTNodeKind::getFromNodeKind<NestedNameSpecifier>(),
+ ASTNodeKind::getFromNodeKind<NestedNameSpecifierLoc>(),
+ ASTNodeKind::getFromNodeKind<TypeLoc>()};
// Starting with the above seed of acceptable top-level matcher types, compute
// the acceptable type set for the argument indicated by each context element.
- std::set<ASTNodeKind> TypeSet(InitialTypesRef.begin(), InitialTypesRef.end());
- for (ArrayRef<std::pair<MatcherCtor, unsigned> >::iterator
- CtxI = Context.begin(),
- CtxE = Context.end();
- CtxI != CtxE; ++CtxI) {
- std::vector<internal::ArgKind> NextTypeSet;
- for (std::set<ASTNodeKind>::iterator I = TypeSet.begin(), E = TypeSet.end();
- I != E; ++I) {
- if (CtxI->first->isConvertibleTo(*I) &&
- (CtxI->first->isVariadic() ||
- CtxI->second < CtxI->first->getNumArgs()))
- CtxI->first->getArgKinds(*I, CtxI->second, NextTypeSet);
+ std::set<ArgKind> TypeSet(std::begin(InitialTypes), std::end(InitialTypes));
+ for (const auto &CtxEntry : Context) {
+ MatcherCtor Ctor = CtxEntry.first;
+ unsigned ArgNumber = CtxEntry.second;
+ std::vector<ArgKind> NextTypeSet;
+ for (const ArgKind &Kind : TypeSet) {
+ if (Kind.getArgKind() == Kind.AK_Matcher &&
+ Ctor->isConvertibleTo(Kind.getMatcherKind()) &&
+ (Ctor->isVariadic() || ArgNumber < Ctor->getNumArgs()))
+ Ctor->getArgKinds(Kind.getMatcherKind(), ArgNumber, NextTypeSet);
}
TypeSet.clear();
- for (std::vector<internal::ArgKind>::iterator I = NextTypeSet.begin(),
- E = NextTypeSet.end();
- I != E; ++I) {
- if (I->getArgKind() == internal::ArgKind::AK_Matcher)
- TypeSet.insert(I->getMatcherKind());
- }
+ TypeSet.insert(NextTypeSet.begin(), NextTypeSet.end());
}
+ return std::vector<ArgKind>(TypeSet.begin(), TypeSet.end());
+}
- typedef std::map<std::pair<unsigned, std::string>, MatcherCompletion,
- ReverseSpecificityThenName> CompletionsTy;
- CompletionsTy Completions;
+std::vector<MatcherCompletion>
+Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
+ std::vector<MatcherCompletion> Completions;
- // TypeSet now contains the list of acceptable types for the argument we are
- // completing. Search the registry for acceptable matchers.
+ // Search the registry for acceptable matchers.
for (ConstructorMap::const_iterator I = RegistryData->constructors().begin(),
E = RegistryData->constructors().end();
I != E; ++I) {
std::set<ASTNodeKind> RetKinds;
unsigned NumArgs = I->second->isVariadic() ? 1 : I->second->getNumArgs();
bool IsPolymorphic = I->second->isPolymorphic();
- std::vector<std::vector<internal::ArgKind> > ArgsKinds(NumArgs);
+ std::vector<std::vector<ArgKind>> ArgsKinds(NumArgs);
unsigned MaxSpecificity = 0;
- for (std::set<ASTNodeKind>::iterator TI = TypeSet.begin(),
- TE = TypeSet.end();
- TI != TE; ++TI) {
+ for (const ArgKind& Kind : AcceptedTypes) {
+ if (Kind.getArgKind() != Kind.AK_Matcher)
+ continue;
unsigned Specificity;
ASTNodeKind LeastDerivedKind;
- if (I->second->isConvertibleTo(*TI, &Specificity, &LeastDerivedKind)) {
+ if (I->second->isConvertibleTo(Kind.getMatcherKind(), &Specificity,
+ &LeastDerivedKind)) {
if (MaxSpecificity < Specificity)
MaxSpecificity = Specificity;
RetKinds.insert(LeastDerivedKind);
for (unsigned Arg = 0; Arg != NumArgs; ++Arg)
- I->second->getArgKinds(*TI, Arg, ArgsKinds[Arg]);
+ I->second->getArgKinds(Kind.getMatcherKind(), Arg, ArgsKinds[Arg]);
if (IsPolymorphic)
break;
}
@@ -437,24 +446,25 @@ std::vector<MatcherCompletion> Registry::getCompletions(
OS << "Matcher<T> " << I->first() << "(Matcher<T>";
} else {
OS << "Matcher<" << RetKinds << "> " << I->first() << "(";
- for (std::vector<std::vector<internal::ArgKind> >::iterator
- KI = ArgsKinds.begin(),
- KE = ArgsKinds.end();
- KI != KE; ++KI) {
- if (KI != ArgsKinds.begin())
+ for (const std::vector<ArgKind> &Arg : ArgsKinds) {
+ if (&Arg != &ArgsKinds[0])
OS << ", ";
- // This currently assumes that a matcher may not overload a
- // non-matcher, and all non-matcher overloads have identical
- // arguments.
- if ((*KI)[0].getArgKind() == internal::ArgKind::AK_Matcher) {
- std::set<ASTNodeKind> MatcherKinds;
- std::transform(
- KI->begin(), KI->end(),
- std::inserter(MatcherKinds, MatcherKinds.end()),
- std::mem_fun_ref(&internal::ArgKind::getMatcherKind));
+
+ bool FirstArgKind = true;
+ std::set<ASTNodeKind> MatcherKinds;
+ // Two steps. First all non-matchers, then matchers only.
+ for (const ArgKind &AK : Arg) {
+ if (AK.getArgKind() == ArgKind::AK_Matcher) {
+ MatcherKinds.insert(AK.getMatcherKind());
+ } else {
+ if (!FirstArgKind) OS << "|";
+ FirstArgKind = false;
+ OS << AK.asString();
+ }
+ }
+ if (!MatcherKinds.empty()) {
+ if (!FirstArgKind) OS << "|";
OS << "Matcher<" << MatcherKinds << ">";
- } else {
- OS << (*KI)[0].asString();
}
}
}
@@ -466,19 +476,14 @@ std::vector<MatcherCompletion> Registry::getCompletions(
TypedText += "(";
if (ArgsKinds.empty())
TypedText += ")";
- else if (ArgsKinds[0][0].getArgKind() == internal::ArgKind::AK_String)
+ else if (ArgsKinds[0][0].getArgKind() == ArgKind::AK_String)
TypedText += "\"";
- Completions[std::make_pair(MaxSpecificity, I->first())] =
- MatcherCompletion(TypedText, OS.str());
+ Completions.emplace_back(TypedText, OS.str(), MaxSpecificity);
}
}
- std::vector<MatcherCompletion> RetVal;
- for (CompletionsTy::iterator I = Completions.begin(), E = Completions.end();
- I != E; ++I)
- RetVal.push_back(I->second);
- return RetVal;
+ return Completions;
}
// static
diff --git a/lib/ASTMatchers/Dynamic/VariantValue.cpp b/lib/ASTMatchers/Dynamic/VariantValue.cpp
index 18c989432f46..a88b70701234 100644
--- a/lib/ASTMatchers/Dynamic/VariantValue.cpp
+++ b/lib/ASTMatchers/Dynamic/VariantValue.cpp
@@ -20,26 +20,88 @@ namespace clang {
namespace ast_matchers {
namespace dynamic {
-VariantMatcher::MatcherOps::~MatcherOps() {}
+std::string ArgKind::asString() const {
+ switch (getArgKind()) {
+ case AK_Matcher:
+ return (Twine("Matcher<") + MatcherKind.asStringRef() + ">").str();
+ case AK_Unsigned:
+ return "unsigned";
+ case AK_String:
+ return "string";
+ }
+ llvm_unreachable("unhandled ArgKind");
+}
+
+bool ArgKind::isConvertibleTo(ArgKind To, unsigned *Specificity) const {
+ if (K != To.K)
+ return false;
+ if (K != AK_Matcher) {
+ if (Specificity)
+ *Specificity = 1;
+ return true;
+ }
+ unsigned Distance;
+ if (!MatcherKind.isBaseOf(To.MatcherKind, &Distance))
+ return false;
+
+ if (Specificity)
+ *Specificity = 100 - Distance;
+ return true;
+}
+
+bool
+VariantMatcher::MatcherOps::canConstructFrom(const DynTypedMatcher &Matcher,
+ bool &IsExactMatch) const {
+ IsExactMatch = Matcher.getSupportedKind().isSame(NodeKind);
+ return Matcher.canConvertTo(NodeKind);
+}
+
+llvm::Optional<DynTypedMatcher>
+VariantMatcher::MatcherOps::constructVariadicOperator(
+ DynTypedMatcher::VariadicOperator Op,
+ ArrayRef<VariantMatcher> InnerMatchers) const {
+ std::vector<DynTypedMatcher> DynMatchers;
+ for (const auto &InnerMatcher : InnerMatchers) {
+ // Abort if any of the inner matchers can't be converted to
+ // Matcher<T>.
+ if (!InnerMatcher.Value)
+ return llvm::None;
+ llvm::Optional<DynTypedMatcher> Inner =
+ InnerMatcher.Value->getTypedMatcher(*this);
+ if (!Inner)
+ return llvm::None;
+ DynMatchers.push_back(*Inner);
+ }
+ return DynTypedMatcher::constructVariadic(Op, DynMatchers);
+}
+
VariantMatcher::Payload::~Payload() {}
class VariantMatcher::SinglePayload : public VariantMatcher::Payload {
public:
SinglePayload(const DynTypedMatcher &Matcher) : Matcher(Matcher) {}
- virtual llvm::Optional<DynTypedMatcher> getSingleMatcher() const {
+ llvm::Optional<DynTypedMatcher> getSingleMatcher() const override {
return Matcher;
}
- virtual std::string getTypeAsString() const {
+ std::string getTypeAsString() const override {
return (Twine("Matcher<") + Matcher.getSupportedKind().asStringRef() + ">")
.str();
}
- virtual void makeTypedMatcher(MatcherOps &Ops) const {
+ llvm::Optional<DynTypedMatcher>
+ getTypedMatcher(const MatcherOps &Ops) const override {
bool Ignore;
if (Ops.canConstructFrom(Matcher, Ignore))
- Ops.constructFrom(Matcher);
+ return Matcher;
+ return llvm::None;
+ }
+
+ bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
+ unsigned *Specificity) const override {
+ return ArgKind(Matcher.getSupportedKind())
+ .isConvertibleTo(Kind, Specificity);
}
private:
@@ -51,15 +113,15 @@ public:
PolymorphicPayload(std::vector<DynTypedMatcher> MatchersIn)
: Matchers(std::move(MatchersIn)) {}
- virtual ~PolymorphicPayload() {}
+ ~PolymorphicPayload() override {}
- virtual llvm::Optional<DynTypedMatcher> getSingleMatcher() const {
+ llvm::Optional<DynTypedMatcher> getSingleMatcher() const override {
if (Matchers.size() != 1)
return llvm::Optional<DynTypedMatcher>();
return Matchers[0];
}
- virtual std::string getTypeAsString() const {
+ std::string getTypeAsString() const override {
std::string Inner;
for (size_t i = 0, e = Matchers.size(); i != e; ++i) {
if (i != 0)
@@ -69,7 +131,8 @@ public:
return (Twine("Matcher<") + Inner + ">").str();
}
- virtual void makeTypedMatcher(MatcherOps &Ops) const {
+ llvm::Optional<DynTypedMatcher>
+ getTypedMatcher(const MatcherOps &Ops) const override {
bool FoundIsExact = false;
const DynTypedMatcher *Found = nullptr;
int NumFound = 0;
@@ -89,7 +152,23 @@ public:
}
// We only succeed if we found exactly one, or if we found an exact match.
if (Found && (FoundIsExact || NumFound == 1))
- Ops.constructFrom(*Found);
+ return *Found;
+ return llvm::None;
+ }
+
+ bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
+ unsigned *Specificity) const override {
+ unsigned MaxSpecificity = 0;
+ for (const DynTypedMatcher &Matcher : Matchers) {
+ unsigned ThisSpecificity;
+ if (ArgKind(Matcher.getSupportedKind())
+ .isConvertibleTo(Kind, &ThisSpecificity)) {
+ MaxSpecificity = std::max(MaxSpecificity, ThisSpecificity);
+ }
+ }
+ if (Specificity)
+ *Specificity = MaxSpecificity;
+ return MaxSpecificity > 0;
}
const std::vector<DynTypedMatcher> Matchers;
@@ -97,15 +176,15 @@ public:
class VariantMatcher::VariadicOpPayload : public VariantMatcher::Payload {
public:
- VariadicOpPayload(ast_matchers::internal::VariadicOperatorFunction Func,
+ VariadicOpPayload(DynTypedMatcher::VariadicOperator Op,
std::vector<VariantMatcher> Args)
- : Func(Func), Args(std::move(Args)) {}
+ : Op(Op), Args(std::move(Args)) {}
- virtual llvm::Optional<DynTypedMatcher> getSingleMatcher() const {
+ llvm::Optional<DynTypedMatcher> getSingleMatcher() const override {
return llvm::Optional<DynTypedMatcher>();
}
- virtual std::string getTypeAsString() const {
+ std::string getTypeAsString() const override {
std::string Inner;
for (size_t i = 0, e = Args.size(); i != e; ++i) {
if (i != 0)
@@ -115,12 +194,22 @@ public:
return Inner;
}
- virtual void makeTypedMatcher(MatcherOps &Ops) const {
- Ops.constructVariadicOperator(Func, Args);
+ llvm::Optional<DynTypedMatcher>
+ getTypedMatcher(const MatcherOps &Ops) const override {
+ return Ops.constructVariadicOperator(Op, Args);
+ }
+
+ bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
+ unsigned *Specificity) const override {
+ for (const VariantMatcher &Matcher : Args) {
+ if (!Matcher.isConvertibleTo(Kind, Specificity))
+ return false;
+ }
+ return true;
}
private:
- const ast_matchers::internal::VariadicOperatorFunction Func;
+ const DynTypedMatcher::VariadicOperator Op;
const std::vector<VariantMatcher> Args;
};
@@ -136,9 +225,9 @@ VariantMatcher::PolymorphicMatcher(std::vector<DynTypedMatcher> Matchers) {
}
VariantMatcher VariantMatcher::VariadicOperatorMatcher(
- ast_matchers::internal::VariadicOperatorFunction Func,
+ DynTypedMatcher::VariadicOperator Op,
std::vector<VariantMatcher> Args) {
- return VariantMatcher(new VariadicOpPayload(Func, std::move(Args)));
+ return VariantMatcher(new VariadicOpPayload(Op, std::move(Args)));
}
llvm::Optional<DynTypedMatcher> VariantMatcher::getSingleMatcher() const {
@@ -251,6 +340,43 @@ void VariantValue::setMatcher(const VariantMatcher &NewValue) {
Value.Matcher = new VariantMatcher(NewValue);
}
+bool VariantValue::isConvertibleTo(ArgKind Kind, unsigned *Specificity) const {
+ switch (Kind.getArgKind()) {
+ case ArgKind::AK_Unsigned:
+ if (!isUnsigned())
+ return false;
+ *Specificity = 1;
+ return true;
+
+ case ArgKind::AK_String:
+ if (!isString())
+ return false;
+ *Specificity = 1;
+ return true;
+
+ case ArgKind::AK_Matcher:
+ if (!isMatcher())
+ return false;
+ return getMatcher().isConvertibleTo(Kind.getMatcherKind(), Specificity);
+ }
+ llvm_unreachable("Invalid Type");
+}
+
+bool VariantValue::isConvertibleTo(ArrayRef<ArgKind> Kinds,
+ unsigned *Specificity) const {
+ unsigned MaxSpecificity = 0;
+ for (const ArgKind& Kind : Kinds) {
+ unsigned ThisSpecificity;
+ if (!isConvertibleTo(Kind, &ThisSpecificity))
+ continue;
+ MaxSpecificity = std::max(MaxSpecificity, ThisSpecificity);
+ }
+ if (Specificity && MaxSpecificity > 0) {
+ *Specificity = MaxSpecificity;
+ }
+ return MaxSpecificity > 0;
+}
+
std::string VariantValue::getTypeAsString() const {
switch (Type) {
case VT_String: return "String";
diff --git a/lib/Analysis/AnalysisDeclContext.cpp b/lib/Analysis/AnalysisDeclContext.cpp
index 90d4b13b88be..be66f32e77be 100644
--- a/lib/Analysis/AnalysisDeclContext.cpp
+++ b/lib/Analysis/AnalysisDeclContext.cpp
@@ -69,8 +69,9 @@ AnalysisDeclContextManager::AnalysisDeclContextManager(bool useUnoptimizedCFG,
bool addTemporaryDtors,
bool synthesizeBodies,
bool addStaticInitBranch,
- bool addCXXNewAllocator)
- : SynthesizeBodies(synthesizeBodies)
+ bool addCXXNewAllocator,
+ CodeInjector *injector)
+ : Injector(injector), SynthesizeBodies(synthesizeBodies)
{
cfgBuildOptions.PruneTriviallyFalseEdges = !useUnoptimizedCFG;
cfgBuildOptions.AddImplicitDtors = addImplicitDtors;
@@ -84,8 +85,8 @@ void AnalysisDeclContextManager::clear() {
llvm::DeleteContainerSeconds(Contexts);
}
-static BodyFarm &getBodyFarm(ASTContext &C) {
- static BodyFarm *BF = new BodyFarm(C);
+static BodyFarm &getBodyFarm(ASTContext &C, CodeInjector *injector = nullptr) {
+ static BodyFarm *BF = new BodyFarm(C, injector);
return *BF;
}
@@ -94,7 +95,7 @@ Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
Stmt *Body = FD->getBody();
if (!Body && Manager && Manager->synthesizeBodies()) {
- Body = getBodyFarm(getASTContext()).getBody(FD);
+ Body = getBodyFarm(getASTContext(), Manager->Injector.get()).getBody(FD);
if (Body)
IsAutosynthesized = true;
}
@@ -103,7 +104,7 @@ Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
Stmt *Body = MD->getBody();
if (!Body && Manager && Manager->synthesizeBodies()) {
- Body = getBodyFarm(getASTContext()).getBody(MD);
+ Body = getBodyFarm(getASTContext(), Manager->Injector.get()).getBody(MD);
if (Body)
IsAutosynthesized = true;
}
@@ -128,6 +129,13 @@ bool AnalysisDeclContext::isBodyAutosynthesized() const {
return Tmp;
}
+bool AnalysisDeclContext::isBodyAutosynthesizedFromModelFile() const {
+ bool Tmp;
+ Stmt *Body = getBody(Tmp);
+ return Tmp && Body->getLocStart().isValid();
+}
+
+
const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
return MD->getSelfDecl();
@@ -181,8 +189,7 @@ CFG *AnalysisDeclContext::getCFG() {
return getUnoptimizedCFG();
if (!builtCFG) {
- cfg.reset(CFG::buildCFG(D, getBody(),
- &D->getASTContext(), cfgBuildOptions));
+ cfg = CFG::buildCFG(D, getBody(), &D->getASTContext(), cfgBuildOptions);
// Even when the cfg is not successfully built, we don't
// want to try building it again.
builtCFG = true;
@@ -200,8 +207,8 @@ CFG *AnalysisDeclContext::getUnoptimizedCFG() {
if (!builtCompleteCFG) {
SaveAndRestore<bool> NotPrune(cfgBuildOptions.PruneTriviallyFalseEdges,
false);
- completeCFG.reset(CFG::buildCFG(D, getBody(), &D->getASTContext(),
- cfgBuildOptions));
+ completeCFG =
+ CFG::buildCFG(D, getBody(), &D->getASTContext(), cfgBuildOptions);
// Even when the cfg is not successfully built, we don't
// want to try building it again.
builtCompleteCFG = true;
@@ -474,7 +481,7 @@ public:
// Non-local variables are also directly modified.
if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
if (!VD->hasLocalStorage()) {
- if (Visited.insert(VD))
+ if (Visited.insert(VD).second)
BEVals.push_back(VD, BC);
}
}
diff --git a/lib/Analysis/BodyFarm.cpp b/lib/Analysis/BodyFarm.cpp
index 316a18b421b5..7d1b23575293 100644
--- a/lib/Analysis/BodyFarm.cpp
+++ b/lib/Analysis/BodyFarm.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/CodeInjector.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
@@ -114,7 +115,7 @@ DeclRefExpr *ASTMaker::makeDeclRefExpr(const VarDecl *D) {
/* QualifierLoc = */ NestedNameSpecifierLoc(),
/* TemplateKWLoc = */ SourceLocation(),
/* D = */ const_cast<VarDecl*>(D),
- /* isEnclosingLocal = */ false,
+ /* RefersToEnclosingVariableOrCapture = */ false,
/* NameLoc = */ SourceLocation(),
/* T = */ D->getType(),
/* VK = */ VK_LValue);
@@ -223,10 +224,8 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
PredicateTy);
// (3) Create the compound statement.
- Stmt *Stmts[2];
- Stmts[0] = B;
- Stmts[1] = CE;
- CompoundStmt *CS = M.makeCompound(ArrayRef<Stmt*>(Stmts, 2));
+ Stmt *Stmts[] = { B, CE };
+ CompoundStmt *CS = M.makeCompound(Stmts);
// (4) Create the 'if' condition.
ImplicitCastExpr *LValToRval =
@@ -337,7 +336,7 @@ static Stmt *create_OSAtomicCompareAndSwap(ASTContext &C, const FunctionDecl *D)
Expr *RetVal = isBoolean ? M.makeIntegralCastToBoolean(BoolVal)
: M.makeIntegralCast(BoolVal, ResultTy);
Stmts[1] = M.makeReturn(RetVal);
- CompoundStmt *Body = M.makeCompound(ArrayRef<Stmt*>(Stmts, 2));
+ CompoundStmt *Body = M.makeCompound(Stmts);
// Construct the else clause.
BoolVal = M.makeObjCBool(false);
@@ -383,6 +382,7 @@ Stmt *BodyFarm::getBody(const FunctionDecl *D) {
}
if (FF) { Val = FF(C, D); }
+ else if (Injector) { Val = Injector->getBody(D); }
return Val.getValue();
}
diff --git a/lib/Analysis/BodyFarm.h b/lib/Analysis/BodyFarm.h
index 2d200fb755c1..91379437231d 100644
--- a/lib/Analysis/BodyFarm.h
+++ b/lib/Analysis/BodyFarm.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_BODYFARM_H
-#define LLVM_CLANG_ANALYSIS_BODYFARM_H
+#ifndef LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
+#define LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
@@ -27,10 +27,11 @@ class FunctionDecl;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class Stmt;
+class CodeInjector;
class BodyFarm {
public:
- BodyFarm(ASTContext &C) : C(C) {}
+ BodyFarm(ASTContext &C, CodeInjector *injector) : C(C), Injector(injector) {}
/// Factory method for creating bodies for ordinary functions.
Stmt *getBody(const FunctionDecl *D);
@@ -43,6 +44,7 @@ private:
ASTContext &C;
BodyMap Bodies;
+ CodeInjector *Injector;
};
}
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 842a385fbcdb..d9073aa63b16 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -234,6 +234,12 @@ public:
}
};
+TryResult bothKnownTrue(TryResult R1, TryResult R2) {
+ if (!R1.isKnown() || !R2.isKnown())
+ return TryResult();
+ return TryResult(R1.isTrue() && R2.isTrue());
+}
+
class reverse_children {
llvm::SmallVector<Stmt *, 12> childrenBuf;
ArrayRef<Stmt*> children;
@@ -300,7 +306,7 @@ class CFGBuilder {
CFGBlock *SwitchTerminatedBlock;
CFGBlock *DefaultCaseBlock;
CFGBlock *TryTerminatedBlock;
-
+
// Current position in local scope.
LocalScope::const_iterator ScopePos;
@@ -343,7 +349,7 @@ public:
cachedEntry(nullptr), lastLookup(nullptr) {}
// buildCFG - Used by external clients to construct the CFG.
- CFG* buildCFG(const Decl *D, Stmt *Statement);
+ std::unique_ptr<CFG> buildCFG(const Decl *D, Stmt *Statement);
bool alwaysAdd(const Stmt *stmt);
@@ -410,16 +416,80 @@ private:
CFGBlock *VisitChildren(Stmt *S);
CFGBlock *VisitNoRecurse(Expr *E, AddStmtChoice asc);
+ /// When creating the CFG for temporary destructors, we want to mirror the
+ /// branch structure of the corresponding constructor calls.
+ /// Thus, while visiting a statement for temporary destructors, we keep a
+ /// context to keep track of the following information:
+ /// - whether a subexpression is executed unconditionally
+ /// - if a subexpression is executed conditionally, the first
+ /// CXXBindTemporaryExpr we encounter in that subexpression (which
+ /// corresponds to the last temporary destructor we have to call for this
+ /// subexpression) and the CFG block at that point (which will become the
+ /// successor block when inserting the decision point).
+ ///
+ /// That way, we can build the branch structure for temporary destructors as
+ /// follows:
+ /// 1. If a subexpression is executed unconditionally, we add the temporary
+ /// destructor calls to the current block.
+ /// 2. If a subexpression is executed conditionally, when we encounter a
+ /// CXXBindTemporaryExpr:
+ /// a) If it is the first temporary destructor call in the subexpression,
+ /// we remember the CXXBindTemporaryExpr and the current block in the
+ /// TempDtorContext; we start a new block, and insert the temporary
+ /// destructor call.
+ /// b) Otherwise, add the temporary destructor call to the current block.
+ /// 3. When we finished visiting a conditionally executed subexpression,
+ /// and we found at least one temporary constructor during the visitation
+ /// (2.a has executed), we insert a decision block that uses the
+ /// CXXBindTemporaryExpr as terminator, and branches to the current block
+ /// if the CXXBindTemporaryExpr was marked executed, and otherwise
+ /// branches to the stored successor.
+ struct TempDtorContext {
+ TempDtorContext()
+ : IsConditional(false), KnownExecuted(true), Succ(nullptr),
+ TerminatorExpr(nullptr) {}
+
+ TempDtorContext(TryResult KnownExecuted)
+ : IsConditional(true), KnownExecuted(KnownExecuted), Succ(nullptr),
+ TerminatorExpr(nullptr) {}
+
+ /// Returns whether we need to start a new branch for a temporary destructor
+ /// call. This is the case when the the temporary destructor is
+ /// conditionally executed, and it is the first one we encounter while
+ /// visiting a subexpression - other temporary destructors at the same level
+ /// will be added to the same block and are executed under the same
+ /// condition.
+ bool needsTempDtorBranch() const {
+ return IsConditional && !TerminatorExpr;
+ }
+
+ /// Remember the successor S of a temporary destructor decision branch for
+ /// the corresponding CXXBindTemporaryExpr E.
+ void setDecisionPoint(CFGBlock *S, CXXBindTemporaryExpr *E) {
+ Succ = S;
+ TerminatorExpr = E;
+ }
+
+ const bool IsConditional;
+ const TryResult KnownExecuted;
+ CFGBlock *Succ;
+ CXXBindTemporaryExpr *TerminatorExpr;
+ };
+
// Visitors to walk an AST and generate destructors of temporaries in
// full expression.
- CFGBlock *VisitForTemporaryDtors(Stmt *E, bool BindToTemporary = false);
- CFGBlock *VisitChildrenForTemporaryDtors(Stmt *E);
- CFGBlock *VisitBinaryOperatorForTemporaryDtors(BinaryOperator *E);
- CFGBlock *VisitCXXBindTemporaryExprForTemporaryDtors(CXXBindTemporaryExpr *E,
- bool BindToTemporary);
- CFGBlock *
- VisitConditionalOperatorForTemporaryDtors(AbstractConditionalOperator *E,
- bool BindToTemporary);
+ CFGBlock *VisitForTemporaryDtors(Stmt *E, bool BindToTemporary,
+ TempDtorContext &Context);
+ CFGBlock *VisitChildrenForTemporaryDtors(Stmt *E, TempDtorContext &Context);
+ CFGBlock *VisitBinaryOperatorForTemporaryDtors(BinaryOperator *E,
+ TempDtorContext &Context);
+ CFGBlock *VisitCXXBindTemporaryExprForTemporaryDtors(
+ CXXBindTemporaryExpr *E, bool BindToTemporary, TempDtorContext &Context);
+ CFGBlock *VisitConditionalOperatorForTemporaryDtors(
+ AbstractConditionalOperator *E, bool BindToTemporary,
+ TempDtorContext &Context);
+ void InsertTempDtorDecisionBlock(const TempDtorContext &Context,
+ CFGBlock *FalseSucc = nullptr);
// NYS == Not Yet Supported
CFGBlock *NYS() {
@@ -901,7 +971,7 @@ static const VariableArrayType *FindVA(const Type *t) {
/// body (compound statement). The ownership of the returned CFG is
/// transferred to the caller. If CFG construction fails, this method returns
/// NULL.
-CFG* CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
+std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
assert(cfg.get());
if (!Statement)
return nullptr;
@@ -973,7 +1043,7 @@ CFG* CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
// Create an empty entry block that has no predecessors.
cfg->setEntry(createBlock());
- return cfg.release();
+ return std::move(cfg);
}
/// createBlock - Used to lazily create blocks that are connected
@@ -1000,21 +1070,19 @@ CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
if (!BuildOpts.AddInitializers)
return Block;
- bool IsReference = false;
bool HasTemporaries = false;
// Destructors of temporaries in initialization expression should be called
// after initialization finishes.
Expr *Init = I->getInit();
if (Init) {
- if (FieldDecl *FD = I->getAnyMember())
- IsReference = FD->getType()->isReferenceType();
HasTemporaries = isa<ExprWithCleanups>(Init);
if (BuildOpts.AddTemporaryDtors && HasTemporaries) {
// Generate destructors for temporaries in initialization expression.
+ TempDtorContext Context;
VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
- IsReference);
+ /*BindToTemporary=*/false, Context);
}
}
@@ -1946,7 +2014,6 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
return Block;
}
- bool IsReference = false;
bool HasTemporaries = false;
// Guard static initializers under a branch.
@@ -1968,13 +2035,13 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
// after initialization finishes.
Expr *Init = VD->getInit();
if (Init) {
- IsReference = VD->getType()->isReferenceType();
HasTemporaries = isa<ExprWithCleanups>(Init);
if (BuildOpts.AddTemporaryDtors && HasTemporaries) {
// Generate destructors for temporaries in initialization expression.
+ TempDtorContext Context;
VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
- IsReference);
+ /*BindToTemporary=*/false, Context);
}
}
@@ -3354,7 +3421,8 @@ CFGBlock *CFGBuilder::VisitExprWithCleanups(ExprWithCleanups *E,
if (BuildOpts.AddTemporaryDtors) {
// If adding implicit destructors visit the full expression for adding
// destructors of temporaries.
- VisitForTemporaryDtors(E->getSubExpr());
+ TempDtorContext Context;
+ VisitForTemporaryDtors(E->getSubExpr(), false, Context);
// Full expression has to be added as CFGStmt so it will be sequenced
// before destructors of it's temporaries.
@@ -3463,7 +3531,8 @@ CFGBlock *CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt *I) {
return addStmt(I->getTarget());
}
-CFGBlock *CFGBuilder::VisitForTemporaryDtors(Stmt *E, bool BindToTemporary) {
+CFGBlock *CFGBuilder::VisitForTemporaryDtors(Stmt *E, bool BindToTemporary,
+ TempDtorContext &Context) {
assert(BuildOpts.AddImplicitDtors && BuildOpts.AddTemporaryDtors);
tryAgain:
@@ -3473,32 +3542,52 @@ tryAgain:
}
switch (E->getStmtClass()) {
default:
- return VisitChildrenForTemporaryDtors(E);
+ return VisitChildrenForTemporaryDtors(E, Context);
case Stmt::BinaryOperatorClass:
- return VisitBinaryOperatorForTemporaryDtors(cast<BinaryOperator>(E));
+ return VisitBinaryOperatorForTemporaryDtors(cast<BinaryOperator>(E),
+ Context);
case Stmt::CXXBindTemporaryExprClass:
return VisitCXXBindTemporaryExprForTemporaryDtors(
- cast<CXXBindTemporaryExpr>(E), BindToTemporary);
+ cast<CXXBindTemporaryExpr>(E), BindToTemporary, Context);
case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass:
return VisitConditionalOperatorForTemporaryDtors(
- cast<AbstractConditionalOperator>(E), BindToTemporary);
+ cast<AbstractConditionalOperator>(E), BindToTemporary, Context);
case Stmt::ImplicitCastExprClass:
// For implicit cast we want BindToTemporary to be passed further.
E = cast<CastExpr>(E)->getSubExpr();
goto tryAgain;
+ case Stmt::CXXFunctionalCastExprClass:
+ // For functional cast we want BindToTemporary to be passed further.
+ E = cast<CXXFunctionalCastExpr>(E)->getSubExpr();
+ goto tryAgain;
+
case Stmt::ParenExprClass:
E = cast<ParenExpr>(E)->getSubExpr();
goto tryAgain;
- case Stmt::MaterializeTemporaryExprClass:
- E = cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr();
+ case Stmt::MaterializeTemporaryExprClass: {
+ const MaterializeTemporaryExpr* MTE = cast<MaterializeTemporaryExpr>(E);
+ BindToTemporary = (MTE->getStorageDuration() != SD_FullExpression);
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ // Find the expression whose lifetime needs to be extended.
+ E = const_cast<Expr *>(
+ cast<MaterializeTemporaryExpr>(E)
+ ->GetTemporaryExpr()
+ ->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments));
+ // Visit the skipped comma operator left-hand sides for other temporaries.
+ for (const Expr *CommaLHS : CommaLHSs) {
+ VisitForTemporaryDtors(const_cast<Expr *>(CommaLHS),
+ /*BindToTemporary=*/false, Context);
+ }
goto tryAgain;
+ }
case Stmt::BlockExprClass:
// Don't recurse into blocks; their subexpressions don't get evaluated
@@ -3511,7 +3600,8 @@ tryAgain:
auto *LE = cast<LambdaExpr>(E);
CFGBlock *B = Block;
for (Expr *Init : LE->capture_inits()) {
- if (CFGBlock *R = VisitForTemporaryDtors(Init))
+ if (CFGBlock *R = VisitForTemporaryDtors(
+ Init, /*BindToTemporary=*/false, Context))
B = R;
}
return B;
@@ -3527,7 +3617,13 @@ tryAgain:
}
}
-CFGBlock *CFGBuilder::VisitChildrenForTemporaryDtors(Stmt *E) {
+CFGBlock *CFGBuilder::VisitChildrenForTemporaryDtors(Stmt *E,
+ TempDtorContext &Context) {
+ if (isa<LambdaExpr>(E)) {
+ // Do not visit the children of lambdas; they have their own CFGs.
+ return Block;
+ }
+
// When visiting children for destructors we want to visit them in reverse
// order that they will appear in the CFG. Because the CFG is built
// bottom-up, this means we visit them in their natural order, which
@@ -3535,165 +3631,126 @@ CFGBlock *CFGBuilder::VisitChildrenForTemporaryDtors(Stmt *E) {
CFGBlock *B = Block;
for (Stmt::child_range I = E->children(); I; ++I) {
if (Stmt *Child = *I)
- if (CFGBlock *R = VisitForTemporaryDtors(Child))
+ if (CFGBlock *R = VisitForTemporaryDtors(Child, false, Context))
B = R;
}
return B;
}
-CFGBlock *CFGBuilder::VisitBinaryOperatorForTemporaryDtors(BinaryOperator *E) {
+CFGBlock *CFGBuilder::VisitBinaryOperatorForTemporaryDtors(
+ BinaryOperator *E, TempDtorContext &Context) {
if (E->isLogicalOp()) {
- // Destructors for temporaries in LHS expression should be called after
- // those for RHS expression. Even if this will unnecessarily create a block,
- // this block will be used at least by the full expression.
- autoCreateBlock();
- CFGBlock *ConfluenceBlock = VisitForTemporaryDtors(E->getLHS());
- if (badCFG)
- return nullptr;
-
- Succ = ConfluenceBlock;
- Block = nullptr;
- CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS());
-
- if (RHSBlock) {
- if (badCFG)
- return nullptr;
+ VisitForTemporaryDtors(E->getLHS(), false, Context);
+ TryResult RHSExecuted = tryEvaluateBool(E->getLHS());
+ if (RHSExecuted.isKnown() && E->getOpcode() == BO_LOr)
+ RHSExecuted.negate();
+
+ // We do not know at CFG-construction time whether the right-hand-side was
+ // executed, thus we add a branch node that depends on the temporary
+ // constructor call.
+ TempDtorContext RHSContext(
+ bothKnownTrue(Context.KnownExecuted, RHSExecuted));
+ VisitForTemporaryDtors(E->getRHS(), false, RHSContext);
+ InsertTempDtorDecisionBlock(RHSContext);
- // If RHS expression did produce destructors we need to connect created
- // blocks to CFG in same manner as for binary operator itself.
- CFGBlock *LHSBlock = createBlock(false);
- LHSBlock->setTerminator(CFGTerminator(E, true));
-
- // For binary operator LHS block is before RHS in list of predecessors
- // of ConfluenceBlock.
- std::reverse(ConfluenceBlock->pred_begin(),
- ConfluenceBlock->pred_end());
-
- // See if this is a known constant.
- TryResult KnownVal = tryEvaluateBool(E->getLHS());
- if (KnownVal.isKnown() && (E->getOpcode() == BO_LOr))
- KnownVal.negate();
-
- // Link LHSBlock with RHSBlock exactly the same way as for binary operator
- // itself.
- if (E->getOpcode() == BO_LOr) {
- addSuccessor(LHSBlock, KnownVal.isTrue() ? nullptr : ConfluenceBlock);
- addSuccessor(LHSBlock, KnownVal.isFalse() ? nullptr : RHSBlock);
- } else {
- assert (E->getOpcode() == BO_LAnd);
- addSuccessor(LHSBlock, KnownVal.isFalse() ? nullptr : RHSBlock);
- addSuccessor(LHSBlock, KnownVal.isTrue() ? nullptr : ConfluenceBlock);
- }
-
- Block = LHSBlock;
- return LHSBlock;
- }
-
- Block = ConfluenceBlock;
- return ConfluenceBlock;
+ return Block;
}
if (E->isAssignmentOp()) {
// For assignment operator (=) LHS expression is visited
// before RHS expression. For destructors visit them in reverse order.
- CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS());
- CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS());
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS(), false, Context);
+ CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS(), false, Context);
return LHSBlock ? LHSBlock : RHSBlock;
}
// For any other binary operator RHS expression is visited before
// LHS expression (order of children). For destructors visit them in reverse
// order.
- CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS());
- CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS());
+ CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS(), false, Context);
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS(), false, Context);
return RHSBlock ? RHSBlock : LHSBlock;
}
CFGBlock *CFGBuilder::VisitCXXBindTemporaryExprForTemporaryDtors(
- CXXBindTemporaryExpr *E, bool BindToTemporary) {
+ CXXBindTemporaryExpr *E, bool BindToTemporary, TempDtorContext &Context) {
// First add destructors for temporaries in subexpression.
- CFGBlock *B = VisitForTemporaryDtors(E->getSubExpr());
+ CFGBlock *B = VisitForTemporaryDtors(E->getSubExpr(), false, Context);
if (!BindToTemporary) {
// If lifetime of temporary is not prolonged (by assigning to constant
// reference) add destructor for it.
- // If the destructor is marked as a no-return destructor, we need to create
- // a new block for the destructor which does not have as a successor
- // anything built thus far. Control won't flow out of this block.
const CXXDestructorDecl *Dtor = E->getTemporary()->getDestructor();
+
if (Dtor->isNoReturn()) {
- Succ = B;
+ // If the destructor is marked as a no-return destructor, we need to
+ // create a new block for the destructor which does not have as a
+ // successor anything built thus far. Control won't flow out of this
+ // block.
+ if (B) Succ = B;
Block = createNoReturnBlock();
+ } else if (Context.needsTempDtorBranch()) {
+ // If we need to introduce a branch, we add a new block that we will hook
+ // up to a decision block later.
+ if (B) Succ = B;
+ Block = createBlock();
} else {
autoCreateBlock();
}
-
+ if (Context.needsTempDtorBranch()) {
+ Context.setDecisionPoint(Succ, E);
+ }
appendTemporaryDtor(Block, E);
+
B = Block;
}
return B;
}
-CFGBlock *CFGBuilder::VisitConditionalOperatorForTemporaryDtors(
- AbstractConditionalOperator *E, bool BindToTemporary) {
- // First add destructors for condition expression. Even if this will
- // unnecessarily create a block, this block will be used at least by the full
- // expression.
- autoCreateBlock();
- CFGBlock *ConfluenceBlock = VisitForTemporaryDtors(E->getCond());
- if (badCFG)
- return nullptr;
- if (BinaryConditionalOperator *BCO
- = dyn_cast<BinaryConditionalOperator>(E)) {
- ConfluenceBlock = VisitForTemporaryDtors(BCO->getCommon());
- if (badCFG)
- return nullptr;
- }
-
- // Try to add block with destructors for LHS expression.
- CFGBlock *LHSBlock = nullptr;
- Succ = ConfluenceBlock;
- Block = nullptr;
- LHSBlock = VisitForTemporaryDtors(E->getTrueExpr(), BindToTemporary);
- if (badCFG)
- return nullptr;
-
- // Try to add block with destructors for RHS expression;
- Succ = ConfluenceBlock;
- Block = nullptr;
- CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getFalseExpr(),
- BindToTemporary);
- if (badCFG)
- return nullptr;
-
- if (!RHSBlock && !LHSBlock) {
- // If neither LHS nor RHS expression had temporaries to destroy don't create
- // more blocks.
- Block = ConfluenceBlock;
- return Block;
+void CFGBuilder::InsertTempDtorDecisionBlock(const TempDtorContext &Context,
+ CFGBlock *FalseSucc) {
+ if (!Context.TerminatorExpr) {
+ // If no temporary was found, we do not need to insert a decision point.
+ return;
}
+ assert(Context.TerminatorExpr);
+ CFGBlock *Decision = createBlock(false);
+ Decision->setTerminator(CFGTerminator(Context.TerminatorExpr, true));
+ addSuccessor(Decision, Block, !Context.KnownExecuted.isFalse());
+ addSuccessor(Decision, FalseSucc ? FalseSucc : Context.Succ,
+ !Context.KnownExecuted.isTrue());
+ Block = Decision;
+}
- Block = createBlock(false);
- Block->setTerminator(CFGTerminator(E, true));
- assert(Block->getTerminator().isTemporaryDtorsBranch());
-
- // See if this is a known constant.
- const TryResult &KnownVal = tryEvaluateBool(E->getCond());
-
- if (LHSBlock) {
- addSuccessor(Block, LHSBlock, !KnownVal.isFalse());
- } else if (KnownVal.isFalse()) {
- addSuccessor(Block, nullptr);
+CFGBlock *CFGBuilder::VisitConditionalOperatorForTemporaryDtors(
+ AbstractConditionalOperator *E, bool BindToTemporary,
+ TempDtorContext &Context) {
+ VisitForTemporaryDtors(E->getCond(), false, Context);
+ CFGBlock *ConditionBlock = Block;
+ CFGBlock *ConditionSucc = Succ;
+ TryResult ConditionVal = tryEvaluateBool(E->getCond());
+ TryResult NegatedVal = ConditionVal;
+ if (NegatedVal.isKnown()) NegatedVal.negate();
+
+ TempDtorContext TrueContext(
+ bothKnownTrue(Context.KnownExecuted, ConditionVal));
+ VisitForTemporaryDtors(E->getTrueExpr(), BindToTemporary, TrueContext);
+ CFGBlock *TrueBlock = Block;
+
+ Block = ConditionBlock;
+ Succ = ConditionSucc;
+ TempDtorContext FalseContext(
+ bothKnownTrue(Context.KnownExecuted, NegatedVal));
+ VisitForTemporaryDtors(E->getFalseExpr(), BindToTemporary, FalseContext);
+
+ if (TrueContext.TerminatorExpr && FalseContext.TerminatorExpr) {
+ InsertTempDtorDecisionBlock(FalseContext, TrueBlock);
+ } else if (TrueContext.TerminatorExpr) {
+ Block = TrueBlock;
+ InsertTempDtorDecisionBlock(TrueContext);
} else {
- addSuccessor(Block, ConfluenceBlock);
- std::reverse(ConfluenceBlock->pred_begin(), ConfluenceBlock->pred_end());
+ InsertTempDtorDecisionBlock(FalseContext);
}
-
- if (!RHSBlock)
- RHSBlock = ConfluenceBlock;
-
- addSuccessor(Block, RHSBlock, !KnownVal.isTrue());
-
return Block;
}
@@ -3718,10 +3775,9 @@ CFGBlock *CFG::createBlock() {
return &back();
}
-/// buildCFG - Constructs a CFG from an AST. Ownership of the returned
-/// CFG is returned to the caller.
-CFG* CFG::buildCFG(const Decl *D, Stmt *Statement, ASTContext *C,
- const BuildOptions &BO) {
+/// buildCFG - Constructs a CFG from an AST.
+std::unique_ptr<CFG> CFG::buildCFG(const Decl *D, Stmt *Statement,
+ ASTContext *C, const BuildOptions &BO) {
CFGBuilder Builder(C, BO);
return Builder.buildCFG(D, Statement);
}
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index 461ffb0900bb..1df093d85098 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -11,6 +11,7 @@ add_clang_library(clangAnalysis
CallGraph.cpp
CocoaConventions.cpp
Consumed.cpp
+ CodeInjector.cpp
Dominators.cpp
FormatString.cpp
LiveVariables.cpp
diff --git a/lib/Analysis/CallGraph.cpp b/lib/Analysis/CallGraph.cpp
index f41a96d30ea5..91a8492eaa54 100644
--- a/lib/Analysis/CallGraph.cpp
+++ b/lib/Analysis/CallGraph.cpp
@@ -110,14 +110,13 @@ CallGraph::~CallGraph() {
bool CallGraph::includeInGraph(const Decl *D) {
assert(D);
- if (!D->getBody())
+ if (!D->hasBody())
return false;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// We skip function template definitions, as their semantics is
// only determined when they are instantiated.
- if (!FD->isThisDeclarationADefinition() ||
- FD->isDependentContext())
+ if (FD->isDependentContext())
return false;
IdentifierInfo *II = FD->getIdentifier();
@@ -125,11 +124,6 @@ bool CallGraph::includeInGraph(const Decl *D) {
return false;
}
- if (const ObjCMethodDecl *ID = dyn_cast<ObjCMethodDecl>(D)) {
- if (!ID->isThisDeclarationADefinition())
- return false;
- }
-
return true;
}
@@ -152,6 +146,9 @@ CallGraphNode *CallGraph::getNode(const Decl *F) const {
}
CallGraphNode *CallGraph::getOrInsertNode(Decl *F) {
+ if (F && !isa<ObjCMethodDecl>(F))
+ F = F->getCanonicalDecl();
+
CallGraphNode *&Node = FunctionMap[F];
if (Node)
return Node;
diff --git a/lib/Analysis/CodeInjector.cpp b/lib/Analysis/CodeInjector.cpp
new file mode 100644
index 000000000000..76bf364444d1
--- /dev/null
+++ b/lib/Analysis/CodeInjector.cpp
@@ -0,0 +1,15 @@
+//===-- CodeInjector.cpp ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/CodeInjector.h"
+
+using namespace clang;
+
+CodeInjector::CodeInjector() {}
+CodeInjector::~CodeInjector() {}
diff --git a/lib/Analysis/FormatString.cpp b/lib/Analysis/FormatString.cpp
index 851b97e5bc1d..8c663d856f6a 100644
--- a/lib/Analysis/FormatString.cpp
+++ b/lib/Analysis/FormatString.cpp
@@ -244,6 +244,8 @@ clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS,
++I;
lmKind = LengthModifier::AsInt3264;
break;
+ case 'w':
+ lmKind = LengthModifier::AsWide; ++I; break;
}
LengthModifier lm(lmPosition, lmKind);
FS.setLengthModifier(lm);
@@ -504,6 +506,8 @@ analyze_format_string::LengthModifier::toString() const {
return "a";
case AsMAllocate:
return "m";
+ case AsWide:
+ return "w";
case None:
return "";
}
@@ -550,6 +554,9 @@ const char *ConversionSpecifier::toString() const {
// GlibC specific specifiers.
case PrintErrno: return "m";
+
+ // MS specific specifiers.
+ case ZArg: return "Z";
}
return nullptr;
}
@@ -608,8 +615,21 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target) const {
return true;
// Handle most integer flags
- case LengthModifier::AsChar:
case LengthModifier::AsShort:
+ if (Target.getTriple().isOSMSVCRT()) {
+ switch (CS.getKind()) {
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::SArg:
+ case ConversionSpecifier::ZArg:
+ return true;
+ default:
+ break;
+ }
+ }
+ // Fall through.
+ case LengthModifier::AsChar:
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
case LengthModifier::AsIntMax:
@@ -632,7 +652,7 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target) const {
}
// Handle 'l' flag
- case LengthModifier::AsLong:
+ case LengthModifier::AsLong: // or AsWideChar
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
@@ -655,6 +675,7 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target) const {
case ConversionSpecifier::cArg:
case ConversionSpecifier::sArg:
case ConversionSpecifier::ScanListArg:
+ case ConversionSpecifier::ZArg:
return true;
default:
return false;
@@ -719,6 +740,17 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target) const {
default:
return false;
}
+ case LengthModifier::AsWide:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::SArg:
+ case ConversionSpecifier::ZArg:
+ return Target.getTriple().isOSMSVCRT();
+ default:
+ return false;
+ }
}
llvm_unreachable("Invalid LengthModifier Kind!");
}
@@ -741,6 +773,7 @@ bool FormatSpecifier::hasStandardLengthModifier() const {
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
case LengthModifier::AsInt64:
+ case LengthModifier::AsWide:
return false;
}
llvm_unreachable("Invalid LengthModifier Kind!");
@@ -778,6 +811,7 @@ bool FormatSpecifier::hasStandardConversionSpecifier(const LangOptions &LangOpt)
case ConversionSpecifier::DArg:
case ConversionSpecifier::OArg:
case ConversionSpecifier::UArg:
+ case ConversionSpecifier::ZArg:
return false;
}
llvm_unreachable("Invalid ConversionSpecifier Kind!");
diff --git a/lib/Analysis/FormatStringParsing.h b/lib/Analysis/FormatStringParsing.h
index fba318042cb0..e1652964b8c2 100644
--- a/lib/Analysis/FormatStringParsing.h
+++ b/lib/Analysis/FormatStringParsing.h
@@ -1,5 +1,5 @@
-#ifndef LLVM_CLANG_FORMAT_PARSING_H
-#define LLVM_CLANG_FORMAT_PARSING_H
+#ifndef LLVM_CLANG_LIB_ANALYSIS_FORMATSTRINGPARSING_H
+#define LLVM_CLANG_LIB_ANALYSIS_FORMATSTRINGPARSING_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Type.h"
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index 3d6fc039fd77..86b679cb155b 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -82,7 +82,6 @@ namespace {
class LiveVariablesImpl {
public:
AnalysisDeclContext &analysisContext;
- std::vector<LiveVariables::LivenessValues> cfgBlockValues;
llvm::ImmutableSet<const Stmt *>::Factory SSetFact;
llvm::ImmutableSet<const VarDecl *>::Factory DSetFact;
llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksEndToLiveness;
diff --git a/lib/Analysis/PrintfFormatString.cpp b/lib/Analysis/PrintfFormatString.cpp
index 082a8327a346..146635b88702 100644
--- a/lib/Analysis/PrintfFormatString.cpp
+++ b/lib/Analysis/PrintfFormatString.cpp
@@ -54,7 +54,8 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
const char *E,
unsigned &argIndex,
const LangOptions &LO,
- const TargetInfo &Target) {
+ const TargetInfo &Target,
+ bool Warn) {
using namespace clang::analyze_format_string;
using namespace clang::analyze_printf;
@@ -83,7 +84,8 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
if (I == E) {
// No more characters left?
- H.HandleIncompleteSpecifier(Start, E - Start);
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
@@ -93,7 +95,8 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
if (I == E) {
// No more characters left?
- H.HandleIncompleteSpecifier(Start, E - Start);
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
@@ -118,7 +121,8 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
if (I == E) {
// No more characters left?
- H.HandleIncompleteSpecifier(Start, E - Start);
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
@@ -129,7 +133,8 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
if (I == E) {
// No more characters left?
- H.HandleIncompleteSpecifier(Start, E - Start);
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
@@ -137,7 +142,8 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
if (*I == '.') {
++I;
if (I == E) {
- H.HandleIncompleteSpecifier(Start, E - Start);
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
@@ -147,7 +153,8 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
if (I == E) {
// No more characters left?
- H.HandleIncompleteSpecifier(Start, E - Start);
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
}
@@ -155,7 +162,8 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
// Look for the length modifier.
if (ParseLengthModifier(FS, I, E, LO) && I == E) {
// No more characters left?
- H.HandleIncompleteSpecifier(Start, E - Start);
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
@@ -198,7 +206,7 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
case '@': k = ConversionSpecifier::ObjCObjArg; break;
// Glibc specific.
case 'm': k = ConversionSpecifier::PrintErrno; break;
- // Apple-specific
+ // Apple-specific.
case 'D':
if (Target.getTriple().isOSDarwin())
k = ConversionSpecifier::DArg;
@@ -211,6 +219,10 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
if (Target.getTriple().isOSDarwin())
k = ConversionSpecifier::UArg;
break;
+ // MS specific.
+ case 'Z':
+ if (Target.getTriple().isOSMSVCRT())
+ k = ConversionSpecifier::ZArg;
}
PrintfConversionSpecifier CS(conversionPosition, k);
FS.setConversionSpecifier(CS);
@@ -235,7 +247,7 @@ bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H,
// Keep looking for a format specifier until we have exhausted the string.
while (I != E) {
const PrintfSpecifierResult &FSR = ParsePrintfSpecifier(H, I, E, argIndex,
- LO, Target);
+ LO, Target, true);
// Did a fail-stop error of any kind occur when parsing the specifier?
// If so, don't do any more processing.
if (FSR.shouldStop())
@@ -253,6 +265,34 @@ bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H,
return false;
}
+bool clang::analyze_format_string::ParseFormatStringHasSArg(const char *I,
+ const char *E,
+ const LangOptions &LO,
+ const TargetInfo &Target) {
+
+ unsigned argIndex = 0;
+
+ // Keep looking for a %s format specifier until we have exhausted the string.
+ FormatStringHandler H;
+ while (I != E) {
+ const PrintfSpecifierResult &FSR = ParsePrintfSpecifier(H, I, E, argIndex,
+ LO, Target, false);
+ // Did a fail-stop error of any kind occur when parsing the specifier?
+ // If so, don't do any more processing.
+ if (FSR.shouldStop())
+ return false;
+ // Did we exhaust the string or encounter an error that
+ // we can recover from?
+ if (!FSR.hasValue())
+ continue;
+ const analyze_printf::PrintfSpecifier &FS = FSR.getValue();
+ // Return true if this a %s format specifier.
+ if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::Kind::sArg)
+ return true;
+ }
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// Methods on PrintfSpecifier.
//===----------------------------------------------------------------------===//
@@ -266,9 +306,14 @@ ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
if (CS.getKind() == ConversionSpecifier::cArg)
switch (LM.getKind()) {
- case LengthModifier::None: return Ctx.IntTy;
+ case LengthModifier::None:
+ return Ctx.IntTy;
case LengthModifier::AsLong:
+ case LengthModifier::AsWide:
return ArgType(ArgType::WIntTy, "wint_t");
+ case LengthModifier::AsShort:
+ if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
+ return Ctx.IntTy;
default:
return ArgType::Invalid();
}
@@ -303,6 +348,7 @@ ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
return ArgType(Ctx.getPointerDiffType(), "ptrdiff_t");
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
+ case LengthModifier::AsWide:
return ArgType::Invalid();
}
@@ -337,6 +383,7 @@ ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
return ArgType();
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
+ case LengthModifier::AsWide:
return ArgType::Invalid();
}
@@ -372,6 +419,7 @@ ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
case LengthModifier::AsInt64:
+ case LengthModifier::AsWide:
return ArgType::Invalid();
}
}
@@ -384,15 +432,23 @@ ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
"const unichar *");
return ArgType(ArgType::WCStrTy, "wchar_t *");
}
+ if (LM.getKind() == LengthModifier::AsWide)
+ return ArgType(ArgType::WCStrTy, "wchar_t *");
return ArgType::CStrTy;
case ConversionSpecifier::SArg:
if (IsObjCLiteral)
return ArgType(Ctx.getPointerType(Ctx.UnsignedShortTy.withConst()),
"const unichar *");
+ if (Ctx.getTargetInfo().getTriple().isOSMSVCRT() &&
+ LM.getKind() == LengthModifier::AsShort)
+ return ArgType::CStrTy;
return ArgType(ArgType::WCStrTy, "wchar_t *");
case ConversionSpecifier::CArg:
if (IsObjCLiteral)
return ArgType(Ctx.UnsignedShortTy, "unichar");
+ if (Ctx.getTargetInfo().getTriple().isOSMSVCRT() &&
+ LM.getKind() == LengthModifier::AsShort)
+ return Ctx.IntTy;
return ArgType(Ctx.WideCharTy, "wchar_t");
case ConversionSpecifier::pArg:
return ArgType::CPointerTy;
diff --git a/lib/Analysis/ReachableCode.cpp b/lib/Analysis/ReachableCode.cpp
index b4a72a7f8006..8165b09f4080 100644
--- a/lib/Analysis/ReachableCode.cpp
+++ b/lib/Analysis/ReachableCode.cpp
@@ -13,15 +13,15 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/ReachableCode.h"
-#include "clang/Lex/Preprocessor.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
-#include "clang/AST/StmtCXX.h"
#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtCXX.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
diff --git a/lib/Analysis/ScanfFormatString.cpp b/lib/Analysis/ScanfFormatString.cpp
index ed286274950b..d484d8e828cb 100644
--- a/lib/Analysis/ScanfFormatString.cpp
+++ b/lib/Analysis/ScanfFormatString.cpp
@@ -257,6 +257,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case LengthModifier::AsMAllocate:
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
+ case LengthModifier::AsWide:
return ArgType::Invalid();
}
@@ -295,6 +296,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case LengthModifier::AsMAllocate:
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
+ case LengthModifier::AsWide:
return ArgType::Invalid();
}
@@ -326,10 +328,14 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case LengthModifier::None:
return ArgType::PtrTo(ArgType::AnyCharTy);
case LengthModifier::AsLong:
+ case LengthModifier::AsWide:
return ArgType::PtrTo(ArgType(Ctx.getWideCharType(), "wchar_t"));
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
return ArgType::PtrTo(ArgType::CStrTy);
+ case LengthModifier::AsShort:
+ if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
+ return ArgType::PtrTo(ArgType::AnyCharTy);
default:
return ArgType::Invalid();
}
@@ -338,10 +344,14 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
// FIXME: Mac OS X specific?
switch (LM.getKind()) {
case LengthModifier::None:
+ case LengthModifier::AsWide:
return ArgType::PtrTo(ArgType(Ctx.getWideCharType(), "wchar_t"));
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
return ArgType::PtrTo(ArgType(ArgType::WCStrTy, "wchar_t *"));
+ case LengthModifier::AsShort:
+ if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
+ return ArgType::PtrTo(ArgType::AnyCharTy);
default:
return ArgType::Invalid();
}
@@ -378,6 +388,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case LengthModifier::AsMAllocate:
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
+ case LengthModifier::AsWide:
return ArgType::Invalid();
}
diff --git a/lib/Analysis/ThreadSafety.cpp b/lib/Analysis/ThreadSafety.cpp
index 11df61f80fa0..a986c587f869 100644
--- a/lib/Analysis/ThreadSafety.cpp
+++ b/lib/Analysis/ThreadSafety.cpp
@@ -22,10 +22,10 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/Analyses/ThreadSafety.h"
+#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
#include "clang/Analysis/Analyses/ThreadSafetyLogical.h"
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
-#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
@@ -40,762 +40,111 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <ostream>
+#include <sstream>
#include <utility>
#include <vector>
-using namespace clang;
-using namespace thread_safety;
+
+namespace clang {
+namespace threadSafety {
// Key method definition
ThreadSafetyHandler::~ThreadSafetyHandler() {}
-namespace {
-
-/// SExpr implements a simple expression language that is used to store,
-/// compare, and pretty-print C++ expressions. Unlike a clang Expr, a SExpr
-/// does not capture surface syntax, and it does not distinguish between
-/// C++ concepts, like pointers and references, that have no real semantic
-/// differences. This simplicity allows SExprs to be meaningfully compared,
-/// e.g.
-/// (x) = x
-/// (*this).foo = this->foo
-/// *&a = a
-///
-/// Thread-safety analysis works by comparing lock expressions. Within the
-/// body of a function, an expression such as "x->foo->bar.mu" will resolve to
-/// a particular mutex object at run-time. Subsequent occurrences of the same
-/// expression (where "same" means syntactic equality) will refer to the same
-/// run-time object if three conditions hold:
-/// (1) Local variables in the expression, such as "x" have not changed.
-/// (2) Values on the heap that affect the expression have not changed.
-/// (3) The expression involves only pure function calls.
-///
-/// The current implementation assumes, but does not verify, that multiple uses
-/// of the same lock expression satisfies these criteria.
-class SExpr {
-private:
- enum ExprOp {
- EOP_Nop, ///< No-op
- EOP_Wildcard, ///< Matches anything.
- EOP_Universal, ///< Universal lock.
- EOP_This, ///< This keyword.
- EOP_NVar, ///< Named variable.
- EOP_LVar, ///< Local variable.
- EOP_Dot, ///< Field access
- EOP_Call, ///< Function call
- EOP_MCall, ///< Method call
- EOP_Index, ///< Array index
- EOP_Unary, ///< Unary operation
- EOP_Binary, ///< Binary operation
- EOP_Unknown ///< Catchall for everything else
- };
-
-
- class SExprNode {
- private:
- unsigned char Op; ///< Opcode of the root node
- unsigned char Flags; ///< Additional opcode-specific data
- unsigned short Sz; ///< Number of child nodes
- const void* Data; ///< Additional opcode-specific data
-
- public:
- SExprNode(ExprOp O, unsigned F, const void* D)
- : Op(static_cast<unsigned char>(O)),
- Flags(static_cast<unsigned char>(F)), Sz(1), Data(D)
- { }
-
- unsigned size() const { return Sz; }
- void setSize(unsigned S) { Sz = S; }
-
- ExprOp kind() const { return static_cast<ExprOp>(Op); }
-
- const NamedDecl* getNamedDecl() const {
- assert(Op == EOP_NVar || Op == EOP_LVar || Op == EOP_Dot);
- return reinterpret_cast<const NamedDecl*>(Data);
- }
-
- const NamedDecl* getFunctionDecl() const {
- assert(Op == EOP_Call || Op == EOP_MCall);
- return reinterpret_cast<const NamedDecl*>(Data);
- }
-
- bool isArrow() const { return Op == EOP_Dot && Flags == 1; }
- void setArrow(bool A) { Flags = A ? 1 : 0; }
-
- unsigned arity() const {
- switch (Op) {
- case EOP_Nop: return 0;
- case EOP_Wildcard: return 0;
- case EOP_Universal: return 0;
- case EOP_NVar: return 0;
- case EOP_LVar: return 0;
- case EOP_This: return 0;
- case EOP_Dot: return 1;
- case EOP_Call: return Flags+1; // First arg is function.
- case EOP_MCall: return Flags+1; // First arg is implicit obj.
- case EOP_Index: return 2;
- case EOP_Unary: return 1;
- case EOP_Binary: return 2;
- case EOP_Unknown: return Flags;
- }
- return 0;
- }
-
- bool operator==(const SExprNode& Other) const {
- // Ignore flags and size -- they don't matter.
- return (Op == Other.Op &&
- Data == Other.Data);
- }
-
- bool operator!=(const SExprNode& Other) const {
- return !(*this == Other);
- }
-
- bool matches(const SExprNode& Other) const {
- return (*this == Other) ||
- (Op == EOP_Wildcard) ||
- (Other.Op == EOP_Wildcard);
- }
- };
-
-
- /// \brief Encapsulates the lexical context of a function call. The lexical
- /// context includes the arguments to the call, including the implicit object
- /// argument. When an attribute containing a mutex expression is attached to
- /// a method, the expression may refer to formal parameters of the method.
- /// Actual arguments must be substituted for formal parameters to derive
- /// the appropriate mutex expression in the lexical context where the function
- /// is called. PrevCtx holds the context in which the arguments themselves
- /// should be evaluated; multiple calling contexts can be chained together
- /// by the lock_returned attribute.
- struct CallingContext {
- const NamedDecl* AttrDecl; // The decl to which the attribute is attached.
- const Expr* SelfArg; // Implicit object argument -- e.g. 'this'
- bool SelfArrow; // is Self referred to with -> or .?
- unsigned NumArgs; // Number of funArgs
- const Expr* const* FunArgs; // Function arguments
- CallingContext* PrevCtx; // The previous context; or 0 if none.
-
- CallingContext(const NamedDecl *D)
- : AttrDecl(D), SelfArg(nullptr), SelfArrow(false), NumArgs(0),
- FunArgs(nullptr), PrevCtx(nullptr) {}
- };
-
- typedef SmallVector<SExprNode, 4> NodeVector;
-
-private:
- // A SExpr is a list of SExprNodes in prefix order. The Size field allows
- // the list to be traversed as a tree.
- NodeVector NodeVec;
-
-private:
- unsigned make(ExprOp O, unsigned F = 0, const void *D = nullptr) {
- NodeVec.push_back(SExprNode(O, F, D));
- return NodeVec.size() - 1;
- }
-
- unsigned makeNop() {
- return make(EOP_Nop);
- }
-
- unsigned makeWildcard() {
- return make(EOP_Wildcard);
- }
-
- unsigned makeUniversal() {
- return make(EOP_Universal);
- }
-
- unsigned makeNamedVar(const NamedDecl *D) {
- return make(EOP_NVar, 0, D);
- }
-
- unsigned makeLocalVar(const NamedDecl *D) {
- return make(EOP_LVar, 0, D);
- }
-
- unsigned makeThis() {
- return make(EOP_This);
- }
-
- unsigned makeDot(const NamedDecl *D, bool Arrow) {
- return make(EOP_Dot, Arrow ? 1 : 0, D);
- }
-
- unsigned makeCall(unsigned NumArgs, const NamedDecl *D) {
- return make(EOP_Call, NumArgs, D);
- }
-
- // Grab the very first declaration of virtual method D
- const CXXMethodDecl* getFirstVirtualDecl(const CXXMethodDecl *D) {
- while (true) {
- D = D->getCanonicalDecl();
- CXXMethodDecl::method_iterator I = D->begin_overridden_methods(),
- E = D->end_overridden_methods();
- if (I == E)
- return D; // Method does not override anything
- D = *I; // FIXME: this does not work with multiple inheritance.
- }
- return nullptr;
- }
-
- unsigned makeMCall(unsigned NumArgs, const CXXMethodDecl *D) {
- return make(EOP_MCall, NumArgs, getFirstVirtualDecl(D));
- }
-
- unsigned makeIndex() {
- return make(EOP_Index);
- }
-
- unsigned makeUnary() {
- return make(EOP_Unary);
- }
-
- unsigned makeBinary() {
- return make(EOP_Binary);
- }
-
- unsigned makeUnknown(unsigned Arity) {
- return make(EOP_Unknown, Arity);
- }
-
- inline bool isCalleeArrow(const Expr *E) {
- const MemberExpr *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts());
- return ME ? ME->isArrow() : false;
- }
-
- /// Build an SExpr from the given C++ expression.
- /// Recursive function that terminates on DeclRefExpr.
- /// Note: this function merely creates a SExpr; it does not check to
- /// ensure that the original expression is a valid mutex expression.
- ///
- /// NDeref returns the number of Derefence and AddressOf operations
- /// preceding the Expr; this is used to decide whether to pretty-print
- /// SExprs with . or ->.
- unsigned buildSExpr(const Expr *Exp, CallingContext *CallCtx,
- int *NDeref = nullptr) {
- if (!Exp)
- return 0;
-
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
- const NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
- const ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(ND);
- if (PV) {
- const FunctionDecl *FD =
- cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
- unsigned i = PV->getFunctionScopeIndex();
+class TILPrinter :
+ public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
- if (CallCtx && CallCtx->FunArgs &&
- FD == CallCtx->AttrDecl->getCanonicalDecl()) {
- // Substitute call arguments for references to function parameters
- assert(i < CallCtx->NumArgs);
- return buildSExpr(CallCtx->FunArgs[i], CallCtx->PrevCtx, NDeref);
- }
- // Map the param back to the param of the original function declaration.
- makeNamedVar(FD->getParamDecl(i));
- return 1;
- }
- // Not a function parameter -- just store the reference.
- makeNamedVar(ND);
- return 1;
- } else if (isa<CXXThisExpr>(Exp)) {
- // Substitute parent for 'this'
- if (CallCtx && CallCtx->SelfArg) {
- if (!CallCtx->SelfArrow && NDeref)
- // 'this' is a pointer, but self is not, so need to take address.
- --(*NDeref);
- return buildSExpr(CallCtx->SelfArg, CallCtx->PrevCtx, NDeref);
- }
- else {
- makeThis();
- return 1;
- }
- } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
- const NamedDecl *ND = ME->getMemberDecl();
- int ImplicitDeref = ME->isArrow() ? 1 : 0;
- unsigned Root = makeDot(ND, false);
- unsigned Sz = buildSExpr(ME->getBase(), CallCtx, &ImplicitDeref);
- NodeVec[Root].setArrow(ImplicitDeref > 0);
- NodeVec[Root].setSize(Sz + 1);
- return Sz + 1;
- } else if (const CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(Exp)) {
- // When calling a function with a lock_returned attribute, replace
- // the function call with the expression in lock_returned.
- const CXXMethodDecl *MD = CMCE->getMethodDecl()->getMostRecentDecl();
- if (LockReturnedAttr* At = MD->getAttr<LockReturnedAttr>()) {
- CallingContext LRCallCtx(CMCE->getMethodDecl());
- LRCallCtx.SelfArg = CMCE->getImplicitObjectArgument();
- LRCallCtx.SelfArrow = isCalleeArrow(CMCE->getCallee());
- LRCallCtx.NumArgs = CMCE->getNumArgs();
- LRCallCtx.FunArgs = CMCE->getArgs();
- LRCallCtx.PrevCtx = CallCtx;
- return buildSExpr(At->getArg(), &LRCallCtx);
- }
- // Hack to treat smart pointers and iterators as pointers;
- // ignore any method named get().
- if (CMCE->getMethodDecl()->getNameAsString() == "get" &&
- CMCE->getNumArgs() == 0) {
- if (NDeref && isCalleeArrow(CMCE->getCallee()))
- ++(*NDeref);
- return buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx, NDeref);
- }
- unsigned NumCallArgs = CMCE->getNumArgs();
- unsigned Root = makeMCall(NumCallArgs, CMCE->getMethodDecl());
- unsigned Sz = buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx);
- const Expr* const* CallArgs = CMCE->getArgs();
- for (unsigned i = 0; i < NumCallArgs; ++i) {
- Sz += buildSExpr(CallArgs[i], CallCtx);
- }
- NodeVec[Root].setSize(Sz + 1);
- return Sz + 1;
- } else if (const CallExpr *CE = dyn_cast<CallExpr>(Exp)) {
- const FunctionDecl *FD = CE->getDirectCallee()->getMostRecentDecl();
- if (LockReturnedAttr* At = FD->getAttr<LockReturnedAttr>()) {
- CallingContext LRCallCtx(CE->getDirectCallee());
- LRCallCtx.NumArgs = CE->getNumArgs();
- LRCallCtx.FunArgs = CE->getArgs();
- LRCallCtx.PrevCtx = CallCtx;
- return buildSExpr(At->getArg(), &LRCallCtx);
- }
- // Treat smart pointers and iterators as pointers;
- // ignore the * and -> operators.
- if (const CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(CE)) {
- OverloadedOperatorKind k = OE->getOperator();
- if (k == OO_Star) {
- if (NDeref) ++(*NDeref);
- return buildSExpr(OE->getArg(0), CallCtx, NDeref);
- }
- else if (k == OO_Arrow) {
- return buildSExpr(OE->getArg(0), CallCtx, NDeref);
- }
- }
- unsigned NumCallArgs = CE->getNumArgs();
- unsigned Root = makeCall(NumCallArgs, nullptr);
- unsigned Sz = buildSExpr(CE->getCallee(), CallCtx);
- const Expr* const* CallArgs = CE->getArgs();
- for (unsigned i = 0; i < NumCallArgs; ++i) {
- Sz += buildSExpr(CallArgs[i], CallCtx);
- }
- NodeVec[Root].setSize(Sz+1);
- return Sz+1;
- } else if (const BinaryOperator *BOE = dyn_cast<BinaryOperator>(Exp)) {
- unsigned Root = makeBinary();
- unsigned Sz = buildSExpr(BOE->getLHS(), CallCtx);
- Sz += buildSExpr(BOE->getRHS(), CallCtx);
- NodeVec[Root].setSize(Sz);
- return Sz;
- } else if (const UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp)) {
- // Ignore & and * operators -- they're no-ops.
- // However, we try to figure out whether the expression is a pointer,
- // so we can use . and -> appropriately in error messages.
- if (UOE->getOpcode() == UO_Deref) {
- if (NDeref) ++(*NDeref);
- return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref);
- }
- if (UOE->getOpcode() == UO_AddrOf) {
- if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UOE->getSubExpr())) {
- if (DRE->getDecl()->isCXXInstanceMember()) {
- // This is a pointer-to-member expression, e.g. &MyClass::mu_.
- // We interpret this syntax specially, as a wildcard.
- unsigned Root = makeDot(DRE->getDecl(), false);
- makeWildcard();
- NodeVec[Root].setSize(2);
- return 2;
- }
- }
- if (NDeref) --(*NDeref);
- return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref);
- }
- unsigned Root = makeUnary();
- unsigned Sz = buildSExpr(UOE->getSubExpr(), CallCtx);
- NodeVec[Root].setSize(Sz);
- return Sz;
- } else if (const ArraySubscriptExpr *ASE =
- dyn_cast<ArraySubscriptExpr>(Exp)) {
- unsigned Root = makeIndex();
- unsigned Sz = buildSExpr(ASE->getBase(), CallCtx);
- Sz += buildSExpr(ASE->getIdx(), CallCtx);
- NodeVec[Root].setSize(Sz);
- return Sz;
- } else if (const AbstractConditionalOperator *CE =
- dyn_cast<AbstractConditionalOperator>(Exp)) {
- unsigned Root = makeUnknown(3);
- unsigned Sz = buildSExpr(CE->getCond(), CallCtx);
- Sz += buildSExpr(CE->getTrueExpr(), CallCtx);
- Sz += buildSExpr(CE->getFalseExpr(), CallCtx);
- NodeVec[Root].setSize(Sz);
- return Sz;
- } else if (const ChooseExpr *CE = dyn_cast<ChooseExpr>(Exp)) {
- unsigned Root = makeUnknown(3);
- unsigned Sz = buildSExpr(CE->getCond(), CallCtx);
- Sz += buildSExpr(CE->getLHS(), CallCtx);
- Sz += buildSExpr(CE->getRHS(), CallCtx);
- NodeVec[Root].setSize(Sz);
- return Sz;
- } else if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
- return buildSExpr(CE->getSubExpr(), CallCtx, NDeref);
- } else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
- return buildSExpr(PE->getSubExpr(), CallCtx, NDeref);
- } else if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Exp)) {
- return buildSExpr(EWC->getSubExpr(), CallCtx, NDeref);
- } else if (const CXXBindTemporaryExpr *E = dyn_cast<CXXBindTemporaryExpr>(Exp)) {
- return buildSExpr(E->getSubExpr(), CallCtx, NDeref);
- } else if (isa<CharacterLiteral>(Exp) ||
- isa<CXXNullPtrLiteralExpr>(Exp) ||
- isa<GNUNullExpr>(Exp) ||
- isa<CXXBoolLiteralExpr>(Exp) ||
- isa<FloatingLiteral>(Exp) ||
- isa<ImaginaryLiteral>(Exp) ||
- isa<IntegerLiteral>(Exp) ||
- isa<StringLiteral>(Exp) ||
- isa<ObjCStringLiteral>(Exp)) {
- makeNop();
- return 1; // FIXME: Ignore literals for now
- } else {
- makeNop();
- return 1; // Ignore. FIXME: mark as invalid expression?
- }
- }
-
- /// \brief Construct a SExpr from an expression.
- /// \param MutexExp The original mutex expression within an attribute
- /// \param DeclExp An expression involving the Decl on which the attribute
- /// occurs.
- /// \param D The declaration to which the lock/unlock attribute is attached.
- void buildSExprFromExpr(const Expr *MutexExp, const Expr *DeclExp,
- const NamedDecl *D, VarDecl *SelfDecl = nullptr) {
- CallingContext CallCtx(D);
-
- if (MutexExp) {
- if (const StringLiteral* SLit = dyn_cast<StringLiteral>(MutexExp)) {
- if (SLit->getString() == StringRef("*"))
- // The "*" expr is a universal lock, which essentially turns off
- // checks until it is removed from the lockset.
- makeUniversal();
- else
- // Ignore other string literals for now.
- makeNop();
- return;
- }
- }
-
- // If we are processing a raw attribute expression, with no substitutions.
- if (!DeclExp) {
- buildSExpr(MutexExp, nullptr);
- return;
- }
- // Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
- // for formal parameters when we call buildMutexID later.
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
- CallCtx.SelfArg = ME->getBase();
- CallCtx.SelfArrow = ME->isArrow();
- } else if (const CXXMemberCallExpr *CE =
- dyn_cast<CXXMemberCallExpr>(DeclExp)) {
- CallCtx.SelfArg = CE->getImplicitObjectArgument();
- CallCtx.SelfArrow = isCalleeArrow(CE->getCallee());
- CallCtx.NumArgs = CE->getNumArgs();
- CallCtx.FunArgs = CE->getArgs();
- } else if (const CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
- CallCtx.NumArgs = CE->getNumArgs();
- CallCtx.FunArgs = CE->getArgs();
- } else if (const CXXConstructExpr *CE =
- dyn_cast<CXXConstructExpr>(DeclExp)) {
- CallCtx.SelfArg = nullptr; // Will be set below
- CallCtx.NumArgs = CE->getNumArgs();
- CallCtx.FunArgs = CE->getArgs();
- } else if (D && isa<CXXDestructorDecl>(D)) {
- // There's no such thing as a "destructor call" in the AST.
- CallCtx.SelfArg = DeclExp;
- }
+/// Issue a warning about an invalid lock expression
+static void warnInvalidLock(ThreadSafetyHandler &Handler,
+ const Expr *MutexExp, const NamedDecl *D,
+ const Expr *DeclExp, StringRef Kind) {
+ SourceLocation Loc;
+ if (DeclExp)
+ Loc = DeclExp->getExprLoc();
- // Hack to handle constructors, where self cannot be recovered from
- // the expression.
- if (SelfDecl && !CallCtx.SelfArg) {
- DeclRefExpr SelfDRE(SelfDecl, false, SelfDecl->getType(), VK_LValue,
- SelfDecl->getLocation());
- CallCtx.SelfArg = &SelfDRE;
-
- // If the attribute has no arguments, then assume the argument is "this".
- if (!MutexExp)
- buildSExpr(CallCtx.SelfArg, nullptr);
- else // For most attributes.
- buildSExpr(MutexExp, &CallCtx);
- return;
- }
-
- // If the attribute has no arguments, then assume the argument is "this".
- if (!MutexExp)
- buildSExpr(CallCtx.SelfArg, nullptr);
- else // For most attributes.
- buildSExpr(MutexExp, &CallCtx);
- }
-
- /// \brief Get index of next sibling of node i.
- unsigned getNextSibling(unsigned i) const {
- return i + NodeVec[i].size();
- }
-
-public:
- explicit SExpr(clang::Decl::EmptyShell e) { NodeVec.clear(); }
-
- /// \param MutexExp The original mutex expression within an attribute
- /// \param DeclExp An expression involving the Decl on which the attribute
- /// occurs.
- /// \param D The declaration to which the lock/unlock attribute is attached.
- /// Caller must check isValid() after construction.
- SExpr(const Expr *MutexExp, const Expr *DeclExp, const NamedDecl *D,
- VarDecl *SelfDecl = nullptr) {
- buildSExprFromExpr(MutexExp, DeclExp, D, SelfDecl);
- }
-
- /// Return true if this is a valid decl sequence.
- /// Caller must call this by hand after construction to handle errors.
- bool isValid() const {
- return !NodeVec.empty();
- }
-
- bool shouldIgnore() const {
- // Nop is a mutex that we have decided to deliberately ignore.
- assert(NodeVec.size() > 0 && "Invalid Mutex");
- return NodeVec[0].kind() == EOP_Nop;
- }
-
- bool isUniversal() const {
- assert(NodeVec.size() > 0 && "Invalid Mutex");
- return NodeVec[0].kind() == EOP_Universal;
- }
-
- /// Issue a warning about an invalid lock expression
- static void warnInvalidLock(ThreadSafetyHandler &Handler,
- const Expr *MutexExp, const Expr *DeclExp,
- const NamedDecl *D, StringRef Kind) {
- SourceLocation Loc;
- if (DeclExp)
- Loc = DeclExp->getExprLoc();
-
- // FIXME: add a note about the attribute location in MutexExp or D
- if (Loc.isValid())
- Handler.handleInvalidLockExp(Kind, Loc);
- }
-
- bool operator==(const SExpr &other) const {
- return NodeVec == other.NodeVec;
- }
-
- bool operator!=(const SExpr &other) const {
- return !(*this == other);
- }
-
- bool matches(const SExpr &Other, unsigned i = 0, unsigned j = 0) const {
- if (NodeVec[i].matches(Other.NodeVec[j])) {
- unsigned ni = NodeVec[i].arity();
- unsigned nj = Other.NodeVec[j].arity();
- unsigned n = (ni < nj) ? ni : nj;
- bool Result = true;
- unsigned ci = i+1; // first child of i
- unsigned cj = j+1; // first child of j
- for (unsigned k = 0; k < n;
- ++k, ci=getNextSibling(ci), cj = Other.getNextSibling(cj)) {
- Result = Result && matches(Other, ci, cj);
- }
- return Result;
- }
- return false;
- }
-
- // A partial match between a.mu and b.mu returns true a and b have the same
- // type (and thus mu refers to the same mutex declaration), regardless of
- // whether a and b are different objects or not.
- bool partiallyMatches(const SExpr &Other) const {
- if (NodeVec[0].kind() == EOP_Dot)
- return NodeVec[0].matches(Other.NodeVec[0]);
- return false;
- }
-
- /// \brief Pretty print a lock expression for use in error messages.
- std::string toString(unsigned i = 0) const {
- assert(isValid());
- if (i >= NodeVec.size())
- return "";
-
- const SExprNode* N = &NodeVec[i];
- switch (N->kind()) {
- case EOP_Nop:
- return "_";
- case EOP_Wildcard:
- return "(?)";
- case EOP_Universal:
- return "*";
- case EOP_This:
- return "this";
- case EOP_NVar:
- case EOP_LVar: {
- return N->getNamedDecl()->getNameAsString();
- }
- case EOP_Dot: {
- if (NodeVec[i+1].kind() == EOP_Wildcard) {
- std::string S = "&";
- S += N->getNamedDecl()->getQualifiedNameAsString();
- return S;
- }
- std::string FieldName = N->getNamedDecl()->getNameAsString();
- if (NodeVec[i+1].kind() == EOP_This)
- return FieldName;
+ // FIXME: add a note about the attribute location in MutexExp or D
+ if (Loc.isValid())
+ Handler.handleInvalidLockExp(Kind, Loc);
+}
- std::string S = toString(i+1);
- if (N->isArrow())
- return S + "->" + FieldName;
- else
- return S + "." + FieldName;
- }
- case EOP_Call: {
- std::string S = toString(i+1) + "(";
- unsigned NumArgs = N->arity()-1;
- unsigned ci = getNextSibling(i+1);
- for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) {
- S += toString(ci);
- if (k+1 < NumArgs) S += ",";
- }
- S += ")";
- return S;
- }
- case EOP_MCall: {
- std::string S = "";
- if (NodeVec[i+1].kind() != EOP_This)
- S = toString(i+1) + ".";
- if (const NamedDecl *D = N->getFunctionDecl())
- S += D->getNameAsString() + "(";
- else
- S += "#(";
- unsigned NumArgs = N->arity()-1;
- unsigned ci = getNextSibling(i+1);
- for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) {
- S += toString(ci);
- if (k+1 < NumArgs) S += ",";
- }
- S += ")";
- return S;
- }
- case EOP_Index: {
- std::string S1 = toString(i+1);
- std::string S2 = toString(i+1 + NodeVec[i+1].size());
- return S1 + "[" + S2 + "]";
- }
- case EOP_Unary: {
- std::string S = toString(i+1);
- return "#" + S;
- }
- case EOP_Binary: {
- std::string S1 = toString(i+1);
- std::string S2 = toString(i+1 + NodeVec[i+1].size());
- return "(" + S1 + "#" + S2 + ")";
- }
- case EOP_Unknown: {
- unsigned NumChildren = N->arity();
- if (NumChildren == 0)
- return "(...)";
- std::string S = "(";
- unsigned ci = i+1;
- for (unsigned j = 0; j < NumChildren; ++j, ci = getNextSibling(ci)) {
- S += toString(ci);
- if (j+1 < NumChildren) S += "#";
- }
- S += ")";
- return S;
- }
- }
- return "";
- }
-};
-/// \brief A short list of SExprs
-class MutexIDList : public SmallVector<SExpr, 3> {
+/// \brief A set of CapabilityInfo objects, which are compiled from the
+/// requires attributes on a function.
+class CapExprSet : public SmallVector<CapabilityExpr, 4> {
public:
/// \brief Push M onto list, but discard duplicates.
- void push_back_nodup(const SExpr& M) {
- if (end() == std::find(begin(), end(), M))
- push_back(M);
+ void push_back_nodup(const CapabilityExpr &CapE) {
+ iterator It = std::find_if(begin(), end(),
+ [=](const CapabilityExpr &CapE2) {
+ return CapE.equals(CapE2);
+ });
+ if (It == end())
+ push_back(CapE);
}
};
-/// \brief This is a helper class that stores info about the most recent
-/// accquire of a Lock.
-///
-/// The main body of the analysis maps MutexIDs to LockDatas.
-struct LockData {
- SourceLocation AcquireLoc;
-
- /// \brief LKind stores whether a lock is held shared or exclusively.
- /// Note that this analysis does not currently support either re-entrant
- /// locking or lock "upgrading" and "downgrading" between exclusive and
- /// shared.
- ///
- /// FIXME: add support for re-entrant locking and lock up/downgrading
- LockKind LKind;
- bool Asserted; // for asserted locks
- bool Managed; // for ScopedLockable objects
- SExpr UnderlyingMutex; // for ScopedLockable objects
-
- LockData(SourceLocation AcquireLoc, LockKind LKind, bool M=false,
- bool Asrt=false)
- : AcquireLoc(AcquireLoc), LKind(LKind), Asserted(Asrt), Managed(M),
- UnderlyingMutex(Decl::EmptyShell())
- {}
+class FactManager;
+class FactSet;
- LockData(SourceLocation AcquireLoc, LockKind LKind, const SExpr &Mu)
- : AcquireLoc(AcquireLoc), LKind(LKind), Asserted(false), Managed(false),
- UnderlyingMutex(Mu)
- {}
-
- bool operator==(const LockData &other) const {
- return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
- }
-
- bool operator!=(const LockData &other) const {
- return !(*this == other);
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(AcquireLoc.getRawEncoding());
- ID.AddInteger(LKind);
- }
+/// \brief This is a helper class that stores a fact that is known at a
+/// particular point in program execution. Currently, a fact is a capability,
+/// along with additional information, such as where it was acquired, whether
+/// it is exclusive or shared, etc.
+///
+/// FIXME: this analysis does not currently support either re-entrant
+/// locking or lock "upgrading" and "downgrading" between exclusive and
+/// shared.
+class FactEntry : public CapabilityExpr {
+private:
+ LockKind LKind; ///< exclusive or shared
+ SourceLocation AcquireLoc; ///< where it was acquired.
+ bool Asserted; ///< true if the lock was asserted
+public:
+ FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
+ bool Asrt)
+ : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt) {}
+
+ virtual ~FactEntry() {}
+
+ LockKind kind() const { return LKind; }
+ SourceLocation loc() const { return AcquireLoc; }
+ bool asserted() const { return Asserted; }
+
+ virtual void
+ handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+ SourceLocation JoinLoc, LockErrorKind LEK,
+ ThreadSafetyHandler &Handler) const = 0;
+ virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
+ const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+ bool FullyRemove, ThreadSafetyHandler &Handler,
+ StringRef DiagKind) const = 0;
+
+ // Return true if LKind >= LK, where exclusive > shared
bool isAtLeast(LockKind LK) {
- return (LK == LK_Shared) || (LKind == LK_Exclusive);
+ return (LKind == LK_Exclusive) || (LK == LK_Shared);
}
};
-/// \brief A FactEntry stores a single fact that is known at a particular point
-/// in the program execution. Currently, this is information regarding a lock
-/// that is held at that point.
-struct FactEntry {
- SExpr MutID;
- LockData LDat;
-
- FactEntry(const SExpr& M, const LockData& L)
- : MutID(M), LDat(L)
- { }
-};
-
-
typedef unsigned short FactID;
/// \brief FactManager manages the memory for all facts that are created during
/// the analysis of a single routine.
class FactManager {
private:
- std::vector<FactEntry> Facts;
+ std::vector<std::unique_ptr<FactEntry>> Facts;
public:
- FactID newLock(const SExpr& M, const LockData& L) {
- Facts.push_back(FactEntry(M,L));
+ FactID newFact(std::unique_ptr<FactEntry> Entry) {
+ Facts.push_back(std::move(Entry));
return static_cast<unsigned short>(Facts.size() - 1);
}
- const FactEntry& operator[](FactID F) const { return Facts[F]; }
- FactEntry& operator[](FactID F) { return Facts[F]; }
+ const FactEntry &operator[](FactID F) const { return *Facts[F]; }
+ FactEntry &operator[](FactID F) { return *Facts[F]; }
};
@@ -824,68 +173,73 @@ public:
bool isEmpty() const { return FactIDs.size() == 0; }
- FactID addLock(FactManager& FM, const SExpr& M, const LockData& L) {
- FactID F = FM.newLock(M, L);
+ // Return true if the set contains only negative facts
+ bool isEmpty(FactManager &FactMan) const {
+ for (FactID FID : *this) {
+ if (!FactMan[FID].negative())
+ return false;
+ }
+ return true;
+ }
+
+ void addLockByID(FactID ID) { FactIDs.push_back(ID); }
+
+ FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) {
+ FactID F = FM.newFact(std::move(Entry));
FactIDs.push_back(F);
return F;
}
- bool removeLock(FactManager& FM, const SExpr& M) {
+ bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
unsigned n = FactIDs.size();
if (n == 0)
return false;
for (unsigned i = 0; i < n-1; ++i) {
- if (FM[FactIDs[i]].MutID.matches(M)) {
+ if (FM[FactIDs[i]].matches(CapE)) {
FactIDs[i] = FactIDs[n-1];
FactIDs.pop_back();
return true;
}
}
- if (FM[FactIDs[n-1]].MutID.matches(M)) {
+ if (FM[FactIDs[n-1]].matches(CapE)) {
FactIDs.pop_back();
return true;
}
return false;
}
- iterator findLockIter(FactManager &FM, const SExpr &M) {
+ iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
return std::find_if(begin(), end(), [&](FactID ID) {
- return FM[ID].MutID.matches(M);
+ return FM[ID].matches(CapE);
});
}
- LockData *findLock(FactManager &FM, const SExpr &M) const {
+ FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
auto I = std::find_if(begin(), end(), [&](FactID ID) {
- return FM[ID].MutID.matches(M);
+ return FM[ID].matches(CapE);
});
-
- return I != end() ? &FM[*I].LDat : nullptr;
+ return I != end() ? &FM[*I] : nullptr;
}
- LockData *findLockUniv(FactManager &FM, const SExpr &M) const {
+ FactEntry *findLockUniv(FactManager &FM, const CapabilityExpr &CapE) const {
auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
- const SExpr &Expr = FM[ID].MutID;
- return Expr.isUniversal() || Expr.matches(M);
+ return FM[ID].matchesUniv(CapE);
});
-
- return I != end() ? &FM[*I].LDat : nullptr;
+ return I != end() ? &FM[*I] : nullptr;
}
- FactEntry *findPartialMatch(FactManager &FM, const SExpr &M) const {
+ FactEntry *findPartialMatch(FactManager &FM,
+ const CapabilityExpr &CapE) const {
auto I = std::find_if(begin(), end(), [&](FactID ID) {
- return FM[ID].MutID.partiallyMatches(M);
+ return FM[ID].partiallyMatches(CapE);
});
-
return I != end() ? &FM[*I] : nullptr;
}
};
-/// A Lockset maps each SExpr (defined above) to information about how it has
-/// been locked.
-typedef llvm::ImmutableMap<SExpr, LockData> Lockset;
-typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext;
+typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext;
class LocalVariableMap;
/// A side (entry or exit) of a CFG node.
@@ -1404,29 +758,130 @@ static void findBlockLocations(CFG *CFGraph,
}
}
+class LockableFactEntry : public FactEntry {
+private:
+ bool Managed; ///< managed by ScopedLockable object
+
+public:
+ LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
+ bool Mng = false, bool Asrt = false)
+ : FactEntry(CE, LK, Loc, Asrt), Managed(Mng) {}
+
+ void
+ handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+ SourceLocation JoinLoc, LockErrorKind LEK,
+ ThreadSafetyHandler &Handler) const override {
+ if (!Managed && !asserted() && !negative() && !isUniversal()) {
+ Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc,
+ LEK);
+ }
+ }
+
+ void handleUnlock(FactSet &FSet, FactManager &FactMan,
+ const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+ bool FullyRemove, ThreadSafetyHandler &Handler,
+ StringRef DiagKind) const override {
+ FSet.removeLock(FactMan, Cp);
+ if (!Cp.negative()) {
+ FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+ !Cp, LK_Exclusive, UnlockLoc));
+ }
+ }
+};
+
+class ScopedLockableFactEntry : public FactEntry {
+private:
+ SmallVector<const til::SExpr *, 4> UnderlyingMutexes;
+
+public:
+ ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
+ const CapExprSet &Excl, const CapExprSet &Shrd)
+ : FactEntry(CE, LK_Exclusive, Loc, false) {
+ for (const auto &M : Excl)
+ UnderlyingMutexes.push_back(M.sexpr());
+ for (const auto &M : Shrd)
+ UnderlyingMutexes.push_back(M.sexpr());
+ }
+
+ void
+ handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+ SourceLocation JoinLoc, LockErrorKind LEK,
+ ThreadSafetyHandler &Handler) const override {
+ for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+ if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) {
+ // If this scoped lock manages another mutex, and if the underlying
+ // mutex is still held, then warn about the underlying mutex.
+ Handler.handleMutexHeldEndOfScope(
+ "mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK);
+ }
+ }
+ }
+
+ void handleUnlock(FactSet &FSet, FactManager &FactMan,
+ const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+ bool FullyRemove, ThreadSafetyHandler &Handler,
+ StringRef DiagKind) const override {
+ assert(!Cp.negative() && "Managing object cannot be negative.");
+ for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+ CapabilityExpr UnderCp(UnderlyingMutex, false);
+ auto UnderEntry = llvm::make_unique<LockableFactEntry>(
+ !UnderCp, LK_Exclusive, UnlockLoc);
+
+ if (FullyRemove) {
+ // We're destroying the managing object.
+ // Remove the underlying mutex if it exists; but don't warn.
+ if (FSet.findLock(FactMan, UnderCp)) {
+ FSet.removeLock(FactMan, UnderCp);
+ FSet.addLock(FactMan, std::move(UnderEntry));
+ }
+ } else {
+ // We're releasing the underlying mutex, but not destroying the
+ // managing object. Warn on dual release.
+ if (!FSet.findLock(FactMan, UnderCp)) {
+ Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(),
+ UnlockLoc);
+ }
+ FSet.removeLock(FactMan, UnderCp);
+ FSet.addLock(FactMan, std::move(UnderEntry));
+ }
+ }
+ if (FullyRemove)
+ FSet.removeLock(FactMan, Cp);
+ }
+};
+
/// \brief Class which implements the core thread safety analysis routines.
class ThreadSafetyAnalyzer {
friend class BuildLockset;
+ llvm::BumpPtrAllocator Bpa;
+ threadSafety::til::MemRegionRef Arena;
+ threadSafety::SExprBuilder SxBuilder;
+
ThreadSafetyHandler &Handler;
+ const CXXMethodDecl *CurrentMethod;
LocalVariableMap LocalVarMap;
FactManager FactMan;
std::vector<CFGBlockInfo> BlockInfo;
public:
- ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {}
+ ThreadSafetyAnalyzer(ThreadSafetyHandler &H)
+ : Arena(&Bpa), SxBuilder(Arena), Handler(H) {}
+
+ bool inCurrentScope(const CapabilityExpr &CapE);
- void addLock(FactSet &FSet, const SExpr &Mutex, const LockData &LDat,
- StringRef DiagKind);
- void removeLock(FactSet &FSet, const SExpr &Mutex, SourceLocation UnlockLoc,
- bool FullyRemove, LockKind Kind, StringRef DiagKind);
+ void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
+ StringRef DiagKind, bool ReqAttr = false);
+ void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
+ SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind,
+ StringRef DiagKind);
template <typename AttrType>
- void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
+ void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, Expr *Exp,
const NamedDecl *D, VarDecl *SelfDecl = nullptr);
template <class AttrType>
- void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
+ void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, Expr *Exp,
const NamedDecl *D,
const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
Expr *BrE, bool Neg);
@@ -1530,94 +985,107 @@ ClassifyDiagnostic(const AttrTy *A) {
return "mutex";
}
+
+inline bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
+ if (!CurrentMethod)
+ return false;
+ if (auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) {
+ auto *VD = P->clangDecl();
+ if (VD)
+ return VD->getDeclContext() == CurrentMethod->getDeclContext();
+ }
+ return false;
+}
+
+
/// \brief Add a new lock to the lockset, warning if the lock is already there.
-/// \param Mutex -- the Mutex expression for the lock
-/// \param LDat -- the LockData for the lock
-void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const SExpr &Mutex,
- const LockData &LDat, StringRef DiagKind) {
- // FIXME: deal with acquired before/after annotations.
- // FIXME: Don't always warn when we have support for reentrant locks.
- if (Mutex.shouldIgnore())
+/// \param ReqAttr -- true if this is part of an initial Requires attribute.
+void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
+ std::unique_ptr<FactEntry> Entry,
+ StringRef DiagKind, bool ReqAttr) {
+ if (Entry->shouldIgnore())
return;
- if (FSet.findLock(FactMan, Mutex)) {
- if (!LDat.Asserted)
- Handler.handleDoubleLock(DiagKind, Mutex.toString(), LDat.AcquireLoc);
+ if (!ReqAttr && !Entry->negative()) {
+ // look for the negative capability, and remove it from the fact set.
+ CapabilityExpr NegC = !*Entry;
+ FactEntry *Nen = FSet.findLock(FactMan, NegC);
+ if (Nen) {
+ FSet.removeLock(FactMan, NegC);
+ }
+ else {
+ if (inCurrentScope(*Entry) && !Entry->asserted())
+ Handler.handleNegativeNotHeld(DiagKind, Entry->toString(),
+ NegC.toString(), Entry->loc());
+ }
+ }
+
+ // FIXME: deal with acquired before/after annotations.
+ // FIXME: Don't always warn when we have support for reentrant locks.
+ if (FSet.findLock(FactMan, *Entry)) {
+ if (!Entry->asserted())
+ Handler.handleDoubleLock(DiagKind, Entry->toString(), Entry->loc());
} else {
- FSet.addLock(FactMan, Mutex, LDat);
+ FSet.addLock(FactMan, std::move(Entry));
}
}
/// \brief Remove a lock from the lockset, warning if the lock is not there.
-/// \param Mutex The lock expression corresponding to the lock to be removed
/// \param UnlockLoc The source location of the unlock (only used in error msg)
-void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const SExpr &Mutex,
+void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
SourceLocation UnlockLoc,
bool FullyRemove, LockKind ReceivedKind,
StringRef DiagKind) {
- if (Mutex.shouldIgnore())
+ if (Cp.shouldIgnore())
return;
- const LockData *LDat = FSet.findLock(FactMan, Mutex);
+ const FactEntry *LDat = FSet.findLock(FactMan, Cp);
if (!LDat) {
- Handler.handleUnmatchedUnlock(DiagKind, Mutex.toString(), UnlockLoc);
+ Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc);
return;
}
// Generic lock removal doesn't care about lock kind mismatches, but
// otherwise diagnose when the lock kinds are mismatched.
- if (ReceivedKind != LK_Generic && LDat->LKind != ReceivedKind) {
- Handler.handleIncorrectUnlockKind(DiagKind, Mutex.toString(), LDat->LKind,
- ReceivedKind, UnlockLoc);
- return;
+ if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
+ Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(),
+ LDat->kind(), ReceivedKind, UnlockLoc);
}
- if (LDat->UnderlyingMutex.isValid()) {
- // This is scoped lockable object, which manages the real mutex.
- if (FullyRemove) {
- // We're destroying the managing object.
- // Remove the underlying mutex if it exists; but don't warn.
- if (FSet.findLock(FactMan, LDat->UnderlyingMutex))
- FSet.removeLock(FactMan, LDat->UnderlyingMutex);
- } else {
- // We're releasing the underlying mutex, but not destroying the
- // managing object. Warn on dual release.
- if (!FSet.findLock(FactMan, LDat->UnderlyingMutex)) {
- Handler.handleUnmatchedUnlock(
- DiagKind, LDat->UnderlyingMutex.toString(), UnlockLoc);
- }
- FSet.removeLock(FactMan, LDat->UnderlyingMutex);
- return;
- }
- }
- FSet.removeLock(FactMan, Mutex);
+ LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler,
+ DiagKind);
}
/// \brief Extract the list of mutexIDs from the attribute on an expression,
/// and push them onto Mtxs, discarding any duplicates.
template <typename AttrType>
-void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
+void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
Expr *Exp, const NamedDecl *D,
VarDecl *SelfDecl) {
if (Attr->args_size() == 0) {
// The mutex held is the "this" object.
- SExpr Mu(nullptr, Exp, D, SelfDecl);
- if (!Mu.isValid())
- SExpr::warnInvalidLock(Handler, nullptr, Exp, D,
- ClassifyDiagnostic(Attr));
- else
- Mtxs.push_back_nodup(Mu);
+ CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
+ if (Cp.isInvalid()) {
+ warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
+ return;
+ }
+ //else
+ if (!Cp.shouldIgnore())
+ Mtxs.push_back_nodup(Cp);
return;
}
for (const auto *Arg : Attr->args()) {
- SExpr Mu(Arg, Exp, D, SelfDecl);
- if (!Mu.isValid())
- SExpr::warnInvalidLock(Handler, Arg, Exp, D, ClassifyDiagnostic(Attr));
- else
- Mtxs.push_back_nodup(Mu);
+ CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
+ if (Cp.isInvalid()) {
+ warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
+ continue;
+ }
+ //else
+ if (!Cp.shouldIgnore())
+ Mtxs.push_back_nodup(Cp);
}
}
@@ -1626,7 +1094,7 @@ void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
/// trylock applies to the given edge, then push them onto Mtxs, discarding
/// any duplicates.
template <class AttrType>
-void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
+void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
Expr *Exp, const NamedDecl *D,
const CFGBlock *PredBlock,
const CFGBlock *CurrBlock,
@@ -1758,8 +1226,8 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
if(!FunDecl || !FunDecl->hasAttrs())
return;
- MutexIDList ExclusiveLocksToAdd;
- MutexIDList SharedLocksToAdd;
+ CapExprSet ExclusiveLocksToAdd;
+ CapExprSet SharedLocksToAdd;
// If the condition is a call to a Trylock function, then grab the attributes
for (auto *Attr : FunDecl->getAttrs()) {
@@ -1788,10 +1256,13 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
// Add and remove locks.
SourceLocation Loc = Exp->getExprLoc();
for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
- addLock(Result, ExclusiveLockToAdd, LockData(Loc, LK_Exclusive),
+ addLock(Result, llvm::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
+ LK_Exclusive, Loc),
CapDiagKind);
for (const auto &SharedLockToAdd : SharedLocksToAdd)
- addLock(Result, SharedLockToAdd, LockData(Loc, LK_Shared), CapDiagKind);
+ addLock(Result, llvm::make_unique<LockableFactEntry>(SharedLockToAdd,
+ LK_Shared, Loc),
+ CapDiagKind);
}
/// \brief We use this class to visit different types of expressions in
@@ -1807,16 +1278,17 @@ class BuildLockset : public StmtVisitor<BuildLockset> {
LocalVariableMap::Context LVarCtx;
unsigned CtxIndex;
- // Helper functions
-
+ // helper functions
void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK,
Expr *MutexExp, ProtectedOperationKind POK,
- StringRef DiagKind);
+ StringRef DiagKind, SourceLocation Loc);
void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp,
StringRef DiagKind);
- void checkAccess(const Expr *Exp, AccessKind AK);
- void checkPtAccess(const Expr *Exp, AccessKind AK);
+ void checkAccess(const Expr *Exp, AccessKind AK,
+ ProtectedOperationKind POK = POK_VarAccess);
+ void checkPtAccess(const Expr *Exp, AccessKind AK,
+ ProtectedOperationKind POK = POK_VarAccess);
void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr);
@@ -1837,62 +1309,87 @@ public:
void VisitDeclStmt(DeclStmt *S);
};
+
/// \brief Warn if the LSet does not contain a lock sufficient to protect access
/// of at least the passed in AccessKind.
void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
AccessKind AK, Expr *MutexExp,
ProtectedOperationKind POK,
- StringRef DiagKind) {
+ StringRef DiagKind, SourceLocation Loc) {
LockKind LK = getLockKindFromAccessKind(AK);
- SExpr Mutex(MutexExp, Exp, D);
- if (!Mutex.isValid()) {
- SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D, DiagKind);
+ CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
+ if (Cp.isInvalid()) {
+ warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
return;
- } else if (Mutex.shouldIgnore()) {
+ } else if (Cp.shouldIgnore()) {
+ return;
+ }
+
+ if (Cp.negative()) {
+ // Negative capabilities act like locks excluded
+ FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp);
+ if (LDat) {
+ Analyzer->Handler.handleFunExcludesLock(
+ DiagKind, D->getNameAsString(), (!Cp).toString(), Loc);
+ return;
+ }
+
+ // If this does not refer to a negative capability in the same class,
+ // then stop here.
+ if (!Analyzer->inCurrentScope(Cp))
+ return;
+
+ // Otherwise the negative requirement must be propagated to the caller.
+ LDat = FSet.findLock(Analyzer->FactMan, Cp);
+ if (!LDat) {
+ Analyzer->Handler.handleMutexNotHeld("", D, POK, Cp.toString(),
+ LK_Shared, Loc);
+ }
return;
}
- LockData* LDat = FSet.findLockUniv(Analyzer->FactMan, Mutex);
+ FactEntry* LDat = FSet.findLockUniv(Analyzer->FactMan, Cp);
bool NoError = true;
if (!LDat) {
// No exact match found. Look for a partial match.
- FactEntry* FEntry = FSet.findPartialMatch(Analyzer->FactMan, Mutex);
- if (FEntry) {
+ LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp);
+ if (LDat) {
// Warn that there's no precise match.
- LDat = &FEntry->LDat;
- std::string PartMatchStr = FEntry->MutID.toString();
+ std::string PartMatchStr = LDat->toString();
StringRef PartMatchName(PartMatchStr);
- Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(),
- LK, Exp->getExprLoc(),
- &PartMatchName);
+ Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
+ LK, Loc, &PartMatchName);
} else {
// Warn that there's no match at all.
- Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(),
- LK, Exp->getExprLoc());
+ Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
+ LK, Loc);
}
NoError = false;
}
// Make sure the mutex we found is the right kind.
- if (NoError && LDat && !LDat->isAtLeast(LK))
- Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(), LK,
- Exp->getExprLoc());
+ if (NoError && LDat && !LDat->isAtLeast(LK)) {
+ Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
+ LK, Loc);
+ }
}
/// \brief Warn if the LSet contains the given lock.
void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
- Expr *MutexExp,
- StringRef DiagKind) {
- SExpr Mutex(MutexExp, Exp, D);
- if (!Mutex.isValid()) {
- SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D, DiagKind);
+ Expr *MutexExp, StringRef DiagKind) {
+ CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
+ if (Cp.isInvalid()) {
+ warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
+ return;
+ } else if (Cp.shouldIgnore()) {
return;
}
- LockData* LDat = FSet.findLock(Analyzer->FactMan, Mutex);
- if (LDat)
+ FactEntry* LDat = FSet.findLock(Analyzer->FactMan, Cp);
+ if (LDat) {
Analyzer->Handler.handleFunExcludesLock(
- DiagKind, D->getNameAsString(), Mutex.toString(), Exp->getExprLoc());
+ DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc());
+ }
}
/// \brief Checks guarded_by and pt_guarded_by attributes.
@@ -1900,43 +1397,62 @@ void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
/// marked with guarded_by, we must ensure the appropriate mutexes are held.
/// Similarly, we check if the access is to an expression that dereferences
/// a pointer marked with pt_guarded_by.
-void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK) {
+void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
+ ProtectedOperationKind POK) {
Exp = Exp->IgnoreParenCasts();
+ SourceLocation Loc = Exp->getExprLoc();
+
+ // Local variables of reference type cannot be re-assigned;
+ // map them to their initializer.
+ while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
+ const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
+ if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
+ if (const auto *E = VD->getInit()) {
+ Exp = E;
+ continue;
+ }
+ }
+ break;
+ }
+
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp)) {
// For dereferences
if (UO->getOpcode() == clang::UO_Deref)
- checkPtAccess(UO->getSubExpr(), AK);
+ checkPtAccess(UO->getSubExpr(), AK, POK);
return;
}
if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
- checkPtAccess(AE->getLHS(), AK);
+ checkPtAccess(AE->getLHS(), AK, POK);
return;
}
if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
if (ME->isArrow())
- checkPtAccess(ME->getBase(), AK);
+ checkPtAccess(ME->getBase(), AK, POK);
else
- checkAccess(ME->getBase(), AK);
+ checkAccess(ME->getBase(), AK, POK);
}
const ValueDecl *D = getValueDecl(Exp);
if (!D || !D->hasAttrs())
return;
- if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty())
- Analyzer->Handler.handleNoMutexHeld("mutex", D, POK_VarAccess, AK,
- Exp->getExprLoc());
+ if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) {
+ Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc);
+ }
for (const auto *I : D->specific_attrs<GuardedByAttr>())
- warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK_VarAccess,
- ClassifyDiagnostic(I));
+ warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK,
+ ClassifyDiagnostic(I), Loc);
}
+
/// \brief Checks pt_guarded_by and pt_guarded_var attributes.
-void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK) {
+/// POK is the same operationKind that was passed to checkAccess.
+void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
+ ProtectedOperationKind POK) {
while (true) {
if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
Exp = PE->getSubExpr();
@@ -1946,7 +1462,7 @@ void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK) {
if (CE->getCastKind() == CK_ArrayToPointerDecay) {
// If it's an actual array, and not a pointer, then it's elements
// are protected by GUARDED_BY, not PT_GUARDED_BY;
- checkAccess(CE->getSubExpr(), AK);
+ checkAccess(CE->getSubExpr(), AK, POK);
return;
}
Exp = CE->getSubExpr();
@@ -1955,17 +1471,21 @@ void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK) {
break;
}
+ // Pass by reference warnings are under a different flag.
+ ProtectedOperationKind PtPOK = POK_VarDereference;
+ if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef;
+
const ValueDecl *D = getValueDecl(Exp);
if (!D || !D->hasAttrs())
return;
- if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty())
- Analyzer->Handler.handleNoMutexHeld("mutex", D, POK_VarDereference, AK,
+ if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan))
+ Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK,
Exp->getExprLoc());
for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
- warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK_VarDereference,
- ClassifyDiagnostic(I));
+ warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK,
+ ClassifyDiagnostic(I), Exp->getExprLoc());
}
/// \brief Process a function call, method call, constructor call,
@@ -1981,8 +1501,8 @@ void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK) {
void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
SourceLocation Loc = Exp->getExprLoc();
const AttrVec &ArgAttrs = D->getAttrs();
- MutexIDList ExclusiveLocksToAdd, SharedLocksToAdd;
- MutexIDList ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
+ CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
+ CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
StringRef CapDiagKind = "mutex";
for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
@@ -2006,22 +1526,23 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
case attr::AssertExclusiveLock: {
AssertExclusiveLockAttr *A = cast<AssertExclusiveLockAttr>(At);
- MutexIDList AssertLocks;
+ CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
- Analyzer->addLock(FSet, AssertLock,
- LockData(Loc, LK_Exclusive, false, true),
+ Analyzer->addLock(FSet,
+ llvm::make_unique<LockableFactEntry>(
+ AssertLock, LK_Exclusive, Loc, false, true),
ClassifyDiagnostic(A));
break;
}
case attr::AssertSharedLock: {
AssertSharedLockAttr *A = cast<AssertSharedLockAttr>(At);
- MutexIDList AssertLocks;
+ CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
- Analyzer->addLock(FSet, AssertLock,
- LockData(Loc, LK_Shared, false, true),
+ Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+ AssertLock, LK_Shared, Loc, false, true),
ClassifyDiagnostic(A));
break;
}
@@ -2045,7 +1566,8 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
RequiresCapabilityAttr *A = cast<RequiresCapabilityAttr>(At);
for (auto *Arg : A->args())
warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg,
- POK_FunctionCall, ClassifyDiagnostic(A));
+ POK_FunctionCall, ClassifyDiagnostic(A),
+ Exp->getExprLoc());
break;
}
@@ -2074,25 +1596,28 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
// Add locks.
for (const auto &M : ExclusiveLocksToAdd)
- Analyzer->addLock(FSet, M, LockData(Loc, LK_Exclusive, isScopedVar),
+ Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+ M, LK_Exclusive, Loc, isScopedVar),
CapDiagKind);
for (const auto &M : SharedLocksToAdd)
- Analyzer->addLock(FSet, M, LockData(Loc, LK_Shared, isScopedVar),
+ Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+ M, LK_Shared, Loc, isScopedVar),
CapDiagKind);
- // Add the managing object as a dummy mutex, mapped to the underlying mutex.
- // FIXME -- this doesn't work if we acquire multiple locks.
if (isScopedVar) {
+ // Add the managing object as a dummy mutex, mapped to the underlying mutex.
SourceLocation MLoc = VD->getLocation();
DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
- SExpr SMutex(&DRE, nullptr, nullptr);
-
- for (const auto &M : ExclusiveLocksToAdd)
- Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Exclusive, M),
- CapDiagKind);
- for (const auto &M : SharedLocksToAdd)
- Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Shared, M),
- CapDiagKind);
+ // FIXME: does this store a pointer to DRE?
+ CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr);
+
+ CapExprSet UnderlyingMutexes(ExclusiveLocksToAdd);
+ std::copy(SharedLocksToAdd.begin(), SharedLocksToAdd.end(),
+ std::back_inserter(UnderlyingMutexes));
+ Analyzer->addLock(FSet,
+ llvm::make_unique<ScopedLockableFactEntry>(
+ Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd),
+ CapDiagKind);
}
// Remove locks.
@@ -2149,6 +1674,9 @@ void BuildLockset::VisitCastExpr(CastExpr *CE) {
void BuildLockset::VisitCallExpr(CallExpr *Exp) {
+ bool ExamineArgs = true;
+ bool OperatorFun = false;
+
if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
MemberExpr *ME = dyn_cast<MemberExpr>(CE->getCallee());
// ME can be null when calling a method pointer
@@ -2169,8 +1697,12 @@ void BuildLockset::VisitCallExpr(CallExpr *Exp) {
}
}
} else if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
- switch (OE->getOperator()) {
+ OperatorFun = true;
+
+ auto OEop = OE->getOperator();
+ switch (OEop) {
case OO_Equal: {
+ ExamineArgs = false;
const Expr *Target = OE->getArg(0);
const Expr *Source = OE->getArg(1);
checkAccess(Target, AK_Written);
@@ -2182,16 +1714,53 @@ void BuildLockset::VisitCallExpr(CallExpr *Exp) {
case OO_Subscript: {
const Expr *Obj = OE->getArg(0);
checkAccess(Obj, AK_Read);
- checkPtAccess(Obj, AK_Read);
+ if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
+ // Grrr. operator* can be multiplication...
+ checkPtAccess(Obj, AK_Read);
+ }
break;
}
default: {
+ // TODO: get rid of this, and rely on pass-by-ref instead.
const Expr *Obj = OE->getArg(0);
checkAccess(Obj, AK_Read);
break;
}
}
}
+
+
+ if (ExamineArgs) {
+ if (FunctionDecl *FD = Exp->getDirectCallee()) {
+ unsigned Fn = FD->getNumParams();
+ unsigned Cn = Exp->getNumArgs();
+ unsigned Skip = 0;
+
+ unsigned i = 0;
+ if (OperatorFun) {
+ if (isa<CXXMethodDecl>(FD)) {
+ // First arg in operator call is implicit self argument,
+ // and doesn't appear in the FunctionDecl.
+ Skip = 1;
+ Cn--;
+ } else {
+ // Ignore the first argument of operators; it's been checked above.
+ i = 1;
+ }
+ }
+ // Ignore default arguments
+ unsigned n = (Fn < Cn) ? Fn : Cn;
+
+ for (; i < n; ++i) {
+ ParmVarDecl* Pvd = FD->getParamDecl(i);
+ Expr* Arg = Exp->getArg(i+Skip);
+ QualType Qt = Pvd->getType();
+ if (Qt->isReferenceType())
+ checkAccess(Arg, AK_Read, POK_PassByRef);
+ }
+ }
+ }
+
NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
if(!D || !D->hasAttrs())
return;
@@ -2254,62 +1823,40 @@ void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1,
// Find locks in FSet2 that conflict or are not in FSet1, and warn.
for (const auto &Fact : FSet2) {
- const SExpr &FSet2Mutex = FactMan[Fact].MutID;
- const LockData &LDat2 = FactMan[Fact].LDat;
- FactSet::iterator I1 = FSet1.findLockIter(FactMan, FSet2Mutex);
-
- if (I1 != FSet1.end()) {
- const LockData* LDat1 = &FactMan[*I1].LDat;
- if (LDat1->LKind != LDat2.LKind) {
- Handler.handleExclusiveAndShared("mutex", FSet2Mutex.toString(),
- LDat2.AcquireLoc, LDat1->AcquireLoc);
- if (Modify && LDat1->LKind != LK_Exclusive) {
+ const FactEntry *LDat1 = nullptr;
+ const FactEntry *LDat2 = &FactMan[Fact];
+ FactSet::iterator Iter1 = FSet1.findLockIter(FactMan, *LDat2);
+ if (Iter1 != FSet1.end()) LDat1 = &FactMan[*Iter1];
+
+ if (LDat1) {
+ if (LDat1->kind() != LDat2->kind()) {
+ Handler.handleExclusiveAndShared("mutex", LDat2->toString(),
+ LDat2->loc(), LDat1->loc());
+ if (Modify && LDat1->kind() != LK_Exclusive) {
// Take the exclusive lock, which is the one in FSet2.
- *I1 = Fact;
+ *Iter1 = Fact;
}
}
- else if (LDat1->Asserted && !LDat2.Asserted) {
+ else if (Modify && LDat1->asserted() && !LDat2->asserted()) {
// The non-asserted lock in FSet2 is the one we want to track.
- *I1 = Fact;
+ *Iter1 = Fact;
}
} else {
- if (LDat2.UnderlyingMutex.isValid()) {
- if (FSet2.findLock(FactMan, LDat2.UnderlyingMutex)) {
- // If this is a scoped lock that manages another mutex, and if the
- // underlying mutex is still held, then warn about the underlying
- // mutex.
- Handler.handleMutexHeldEndOfScope("mutex",
- LDat2.UnderlyingMutex.toString(),
- LDat2.AcquireLoc, JoinLoc, LEK1);
- }
- }
- else if (!LDat2.Managed && !FSet2Mutex.isUniversal() && !LDat2.Asserted)
- Handler.handleMutexHeldEndOfScope("mutex", FSet2Mutex.toString(),
- LDat2.AcquireLoc, JoinLoc, LEK1);
+ LDat2->handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1,
+ Handler);
}
}
// Find locks in FSet1 that are not in FSet2, and remove them.
for (const auto &Fact : FSet1Orig) {
- const SExpr &FSet1Mutex = FactMan[Fact].MutID;
- const LockData &LDat1 = FactMan[Fact].LDat;
-
- if (!FSet2.findLock(FactMan, FSet1Mutex)) {
- if (LDat1.UnderlyingMutex.isValid()) {
- if (FSet1Orig.findLock(FactMan, LDat1.UnderlyingMutex)) {
- // If this is a scoped lock that manages another mutex, and if the
- // underlying mutex is still held, then warn about the underlying
- // mutex.
- Handler.handleMutexHeldEndOfScope("mutex",
- LDat1.UnderlyingMutex.toString(),
- LDat1.AcquireLoc, JoinLoc, LEK1);
- }
- }
- else if (!LDat1.Managed && !FSet1Mutex.isUniversal() && !LDat1.Asserted)
- Handler.handleMutexHeldEndOfScope("mutex", FSet1Mutex.toString(),
- LDat1.AcquireLoc, JoinLoc, LEK2);
+ const FactEntry *LDat1 = &FactMan[Fact];
+ const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1);
+
+ if (!LDat2) {
+ LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2,
+ Handler);
if (Modify)
- FSet1.removeLock(FactMan, FSet1Mutex);
+ FSet1.removeLock(FactMan, *LDat1);
}
}
}
@@ -2348,6 +1895,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
CFG *CFGraph = walker.getGraph();
const NamedDecl *D = walker.getDecl();
+ const FunctionDecl *CurrentFunction = dyn_cast<FunctionDecl>(D);
+ CurrentMethod = dyn_cast<CXXMethodDecl>(D);
if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
return;
@@ -2361,6 +1910,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
if (isa<CXXDestructorDecl>(D))
return; // Don't check inside destructors.
+ Handler.enterFunction(CurrentFunction);
+
BlockInfo.resize(CFGraph->getNumBlockIDs(),
CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
@@ -2379,9 +1930,9 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// Fill in source locations for all CFGBlocks.
findBlockLocations(CFGraph, SortedGraph, BlockInfo);
- MutexIDList ExclusiveLocksAcquired;
- MutexIDList SharedLocksAcquired;
- MutexIDList LocksReleased;
+ CapExprSet ExclusiveLocksAcquired;
+ CapExprSet SharedLocksAcquired;
+ CapExprSet LocksReleased;
// Add locks from exclusive_locks_required and shared_locks_required
// to initial lockset. Also turn off checking for lock and unlock functions.
@@ -2391,8 +1942,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
const AttrVec &ArgAttrs = D->getAttrs();
- MutexIDList ExclusiveLocksToAdd;
- MutexIDList SharedLocksToAdd;
+ CapExprSet ExclusiveLocksToAdd;
+ CapExprSet SharedLocksToAdd;
StringRef CapDiagKind = "mutex";
SourceLocation Loc = D->getLocation();
@@ -2428,12 +1979,14 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
}
// FIXME -- Loc can be wrong here.
- for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
- addLock(InitialLockset, ExclusiveLockToAdd, LockData(Loc, LK_Exclusive),
- CapDiagKind);
- for (const auto &SharedLockToAdd : SharedLocksToAdd)
- addLock(InitialLockset, SharedLockToAdd, LockData(Loc, LK_Shared),
- CapDiagKind);
+ for (const auto &Mu : ExclusiveLocksToAdd)
+ addLock(InitialLockset,
+ llvm::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc),
+ CapDiagKind, true);
+ for (const auto &Mu : SharedLocksToAdd)
+ addLock(InitialLockset,
+ llvm::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc),
+ CapDiagKind, true);
}
for (const auto *CurrBlock : *SortedGraph) {
@@ -2602,11 +2155,11 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// issue the appropriate warning.
// FIXME: the location here is not quite right.
for (const auto &Lock : ExclusiveLocksAcquired)
- ExpectedExitSet.addLock(FactMan, Lock,
- LockData(D->getLocation(), LK_Exclusive));
+ ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+ Lock, LK_Exclusive, D->getLocation()));
for (const auto &Lock : SharedLocksAcquired)
- ExpectedExitSet.addLock(FactMan, Lock,
- LockData(D->getLocation(), LK_Shared));
+ ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+ Lock, LK_Shared, D->getLocation()));
for (const auto &Lock : LocksReleased)
ExpectedExitSet.removeLock(FactMan, Lock);
@@ -2616,13 +2169,10 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
LEK_LockedAtEndOfFunction,
LEK_NotLockedAtEndOfFunction,
false);
-}
-
-} // end anonymous namespace
+ Handler.leaveFunction(CurrentFunction);
+}
-namespace clang {
-namespace thread_safety {
/// \brief Check a function's CFG for thread-safety violations.
///
@@ -2647,4 +2197,4 @@ LockKind getLockKindFromAccessKind(AccessKind AK) {
llvm_unreachable("Unknown AccessKind");
}
-}} // end namespace clang::thread_safety
+}} // end namespace clang::threadSafety
diff --git a/lib/Analysis/ThreadSafetyCommon.cpp b/lib/Analysis/ThreadSafetyCommon.cpp
index da88b78126fa..563e0590aacb 100644
--- a/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/lib/Analysis/ThreadSafetyCommon.cpp
@@ -28,7 +28,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-
#include <algorithm>
#include <climits>
#include <vector>
@@ -63,11 +62,9 @@ std::string getSourceLiteralString(const clang::Expr *CE) {
namespace til {
// Return true if E is a variable that points to an incomplete Phi node.
-static bool isIncompleteVar(const SExpr *E) {
- if (const auto *V = dyn_cast<Variable>(E)) {
- if (const auto *Ph = dyn_cast<Phi>(V->definition()))
- return Ph->status() == Phi::PH_Incomplete;
- }
+static bool isIncompletePhi(const SExpr *E) {
+ if (const auto *Ph = dyn_cast<Phi>(E))
+ return Ph->status() == Phi::PH_Incomplete;
return false;
}
@@ -91,6 +88,124 @@ til::SCFG *SExprBuilder::buildCFG(CFGWalker &Walker) {
}
+
+inline bool isCalleeArrow(const Expr *E) {
+ const MemberExpr *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts());
+ return ME ? ME->isArrow() : false;
+}
+
+
+/// \brief Translate a clang expression in an attribute to a til::SExpr.
+/// Constructs the context from D, DeclExp, and SelfDecl.
+///
+/// \param AttrExp The expression to translate.
+/// \param D The declaration to which the attribute is attached.
+/// \param DeclExp An expression involving the Decl to which the attribute
+/// is attached. E.g. the call to a function.
+CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
+ const NamedDecl *D,
+ const Expr *DeclExp,
+ VarDecl *SelfDecl) {
+ // If we are processing a raw attribute expression, with no substitutions.
+ if (!DeclExp)
+ return translateAttrExpr(AttrExp, nullptr);
+
+ CallingContext Ctx(nullptr, D);
+
+ // Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
+ // for formal parameters when we call buildMutexID later.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
+ Ctx.SelfArg = ME->getBase();
+ Ctx.SelfArrow = ME->isArrow();
+ } else if (const CXXMemberCallExpr *CE =
+ dyn_cast<CXXMemberCallExpr>(DeclExp)) {
+ Ctx.SelfArg = CE->getImplicitObjectArgument();
+ Ctx.SelfArrow = isCalleeArrow(CE->getCallee());
+ Ctx.NumArgs = CE->getNumArgs();
+ Ctx.FunArgs = CE->getArgs();
+ } else if (const CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
+ Ctx.NumArgs = CE->getNumArgs();
+ Ctx.FunArgs = CE->getArgs();
+ } else if (const CXXConstructExpr *CE =
+ dyn_cast<CXXConstructExpr>(DeclExp)) {
+ Ctx.SelfArg = nullptr; // Will be set below
+ Ctx.NumArgs = CE->getNumArgs();
+ Ctx.FunArgs = CE->getArgs();
+ } else if (D && isa<CXXDestructorDecl>(D)) {
+ // There's no such thing as a "destructor call" in the AST.
+ Ctx.SelfArg = DeclExp;
+ }
+
+ // Hack to handle constructors, where self cannot be recovered from
+ // the expression.
+ if (SelfDecl && !Ctx.SelfArg) {
+ DeclRefExpr SelfDRE(SelfDecl, false, SelfDecl->getType(), VK_LValue,
+ SelfDecl->getLocation());
+ Ctx.SelfArg = &SelfDRE;
+
+ // If the attribute has no arguments, then assume the argument is "this".
+ if (!AttrExp)
+ return translateAttrExpr(Ctx.SelfArg, nullptr);
+ else // For most attributes.
+ return translateAttrExpr(AttrExp, &Ctx);
+ }
+
+ // If the attribute has no arguments, then assume the argument is "this".
+ if (!AttrExp)
+ return translateAttrExpr(Ctx.SelfArg, nullptr);
+ else // For most attributes.
+ return translateAttrExpr(AttrExp, &Ctx);
+}
+
+
+/// \brief Translate a clang expression in an attribute to a til::SExpr.
+// This assumes a CallingContext has already been created.
+CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
+ CallingContext *Ctx) {
+ if (!AttrExp)
+ return CapabilityExpr(nullptr, false);
+
+ if (auto* SLit = dyn_cast<StringLiteral>(AttrExp)) {
+ if (SLit->getString() == StringRef("*"))
+ // The "*" expr is a universal lock, which essentially turns off
+ // checks until it is removed from the lockset.
+ return CapabilityExpr(new (Arena) til::Wildcard(), false);
+ else
+ // Ignore other string literals for now.
+ return CapabilityExpr(nullptr, false);
+ }
+
+ bool Neg = false;
+ if (auto *OE = dyn_cast<CXXOperatorCallExpr>(AttrExp)) {
+ if (OE->getOperator() == OO_Exclaim) {
+ Neg = true;
+ AttrExp = OE->getArg(0);
+ }
+ }
+ else if (auto *UO = dyn_cast<UnaryOperator>(AttrExp)) {
+ if (UO->getOpcode() == UO_LNot) {
+ Neg = true;
+ AttrExp = UO->getSubExpr();
+ }
+ }
+
+ til::SExpr *E = translate(AttrExp, Ctx);
+
+ // Trap mutex expressions like nullptr, or 0.
+ // Any literal value is nonsense.
+ if (!E || isa<til::Literal>(E))
+ return CapabilityExpr(nullptr, false);
+
+ // Hack to deal with smart pointers -- strip off top-level pointer casts.
+ if (auto *CE = dyn_cast_or_null<til::Cast>(E)) {
+ if (CE->castOpcode() == til::CAST_objToPtr)
+ return CapabilityExpr(CE->expr(), Neg);
+ }
+ return CapabilityExpr(E, Neg);
+}
+
+
+
// Translate a clang statement or expression to a TIL expression.
// Also performs substitution of variables; Ctx provides the context.
// Dispatches on the type of S.
@@ -125,9 +240,10 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
case Stmt::ArraySubscriptExprClass:
return translateArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Ctx);
case Stmt::ConditionalOperatorClass:
- return translateConditionalOperator(cast<ConditionalOperator>(S), Ctx);
+ return translateAbstractConditionalOperator(
+ cast<ConditionalOperator>(S), Ctx);
case Stmt::BinaryConditionalOperatorClass:
- return translateBinaryConditionalOperator(
+ return translateAbstractConditionalOperator(
cast<BinaryConditionalOperator>(S), Ctx);
// We treat these as no-ops
@@ -162,6 +278,7 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
}
+
til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE,
CallingContext *Ctx) {
const ValueDecl *VD = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
@@ -197,17 +314,75 @@ til::SExpr *SExprBuilder::translateCXXThisExpr(const CXXThisExpr *TE,
}
+const ValueDecl *getValueDeclFromSExpr(const til::SExpr *E) {
+ if (auto *V = dyn_cast<til::Variable>(E))
+ return V->clangDecl();
+ if (auto *Ph = dyn_cast<til::Phi>(E))
+ return Ph->clangDecl();
+ if (auto *P = dyn_cast<til::Project>(E))
+ return P->clangDecl();
+ if (auto *L = dyn_cast<til::LiteralPtr>(E))
+ return L->clangDecl();
+ return 0;
+}
+
+bool hasCppPointerType(const til::SExpr *E) {
+ auto *VD = getValueDeclFromSExpr(E);
+ if (VD && VD->getType()->isPointerType())
+ return true;
+ if (auto *C = dyn_cast<til::Cast>(E))
+ return C->castOpcode() == til::CAST_objToPtr;
+
+ return false;
+}
+
+
+// Grab the very first declaration of virtual method D
+const CXXMethodDecl* getFirstVirtualDecl(const CXXMethodDecl *D) {
+ while (true) {
+ D = D->getCanonicalDecl();
+ CXXMethodDecl::method_iterator I = D->begin_overridden_methods(),
+ E = D->end_overridden_methods();
+ if (I == E)
+ return D; // Method does not override anything
+ D = *I; // FIXME: this does not work with multiple inheritance.
+ }
+ return nullptr;
+}
+
til::SExpr *SExprBuilder::translateMemberExpr(const MemberExpr *ME,
CallingContext *Ctx) {
- til::SExpr *E = translate(ME->getBase(), Ctx);
- E = new (Arena) til::SApply(E);
- return new (Arena) til::Project(E, ME->getMemberDecl());
+ til::SExpr *BE = translate(ME->getBase(), Ctx);
+ til::SExpr *E = new (Arena) til::SApply(BE);
+
+ const ValueDecl *D = ME->getMemberDecl();
+ if (auto *VD = dyn_cast<CXXMethodDecl>(D))
+ D = getFirstVirtualDecl(VD);
+
+ til::Project *P = new (Arena) til::Project(E, D);
+ if (hasCppPointerType(BE))
+ P->setArrow(true);
+ return P;
}
til::SExpr *SExprBuilder::translateCallExpr(const CallExpr *CE,
- CallingContext *Ctx) {
- // TODO -- Lock returned
+ CallingContext *Ctx,
+ const Expr *SelfE) {
+ if (CapabilityExprMode) {
+ // Handle LOCK_RETURNED
+ const FunctionDecl *FD = CE->getDirectCallee()->getMostRecentDecl();
+ if (LockReturnedAttr* At = FD->getAttr<LockReturnedAttr>()) {
+ CallingContext LRCallCtx(Ctx);
+ LRCallCtx.AttrDecl = CE->getDirectCallee();
+ LRCallCtx.SelfArg = SelfE;
+ LRCallCtx.NumArgs = CE->getNumArgs();
+ LRCallCtx.FunArgs = CE->getArgs();
+ return const_cast<til::SExpr*>(
+ translateAttrExpr(At->getArg(), &LRCallCtx).sexpr());
+ }
+ }
+
til::SExpr *E = translate(CE->getCallee(), Ctx);
for (const auto *Arg : CE->arguments()) {
til::SExpr *A = translate(Arg, Ctx);
@@ -219,12 +394,31 @@ til::SExpr *SExprBuilder::translateCallExpr(const CallExpr *CE,
til::SExpr *SExprBuilder::translateCXXMemberCallExpr(
const CXXMemberCallExpr *ME, CallingContext *Ctx) {
- return translateCallExpr(cast<CallExpr>(ME), Ctx);
+ if (CapabilityExprMode) {
+ // Ignore calls to get() on smart pointers.
+ if (ME->getMethodDecl()->getNameAsString() == "get" &&
+ ME->getNumArgs() == 0) {
+ auto *E = translate(ME->getImplicitObjectArgument(), Ctx);
+ return new (Arena) til::Cast(til::CAST_objToPtr, E);
+ // return E;
+ }
+ }
+ return translateCallExpr(cast<CallExpr>(ME), Ctx,
+ ME->getImplicitObjectArgument());
}
til::SExpr *SExprBuilder::translateCXXOperatorCallExpr(
const CXXOperatorCallExpr *OCE, CallingContext *Ctx) {
+ if (CapabilityExprMode) {
+ // Ignore operator * and operator -> on smart pointers.
+ OverloadedOperatorKind k = OCE->getOperator();
+ if (k == OO_Star || k == OO_Arrow) {
+ auto *E = translate(OCE->getArg(0), Ctx);
+ return new (Arena) til::Cast(til::CAST_objToPtr, E);
+ // return E;
+ }
+ }
return translateCallExpr(cast<CallExpr>(OCE), Ctx);
}
@@ -238,8 +432,23 @@ til::SExpr *SExprBuilder::translateUnaryOperator(const UnaryOperator *UO,
case UO_PreDec:
return new (Arena) til::Undefined(UO);
+ case UO_AddrOf: {
+ if (CapabilityExprMode) {
+ // interpret &Graph::mu_ as an existential.
+ if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UO->getSubExpr())) {
+ if (DRE->getDecl()->isCXXInstanceMember()) {
+ // This is a pointer-to-member expression, e.g. &MyClass::mu_.
+ // We interpret this syntax specially, as a wildcard.
+ auto *W = new (Arena) til::Wildcard();
+ return new (Arena) til::Project(W, DRE->getDecl());
+ }
+ }
+ }
+ // otherwise, & is a no-op
+ return translate(UO->getSubExpr(), Ctx);
+ }
+
// We treat these as no-ops
- case UO_AddrOf:
case UO_Deref:
case UO_Plus:
return translate(UO->getSubExpr(), Ctx);
@@ -360,7 +569,9 @@ til::SExpr *SExprBuilder::translateCastExpr(const CastExpr *CE,
return E0;
}
til::SExpr *E0 = translate(CE->getSubExpr(), Ctx);
- return new (Arena) til::Load(E0);
+ return E0;
+ // FIXME!! -- get Load working properly
+ // return new (Arena) til::Load(E0);
}
case CK_NoOp:
case CK_DerivedToBase:
@@ -373,6 +584,8 @@ til::SExpr *SExprBuilder::translateCastExpr(const CastExpr *CE,
default: {
// FIXME: handle different kinds of casts.
til::SExpr *E0 = translate(CE->getSubExpr(), Ctx);
+ if (CapabilityExprMode)
+ return E0;
return new (Arena) til::Cast(til::CAST_none, E0);
}
}
@@ -389,15 +602,12 @@ SExprBuilder::translateArraySubscriptExpr(const ArraySubscriptExpr *E,
til::SExpr *
-SExprBuilder::translateConditionalOperator(const ConditionalOperator *C,
- CallingContext *Ctx) {
- return new (Arena) til::Undefined(C);
-}
-
-
-til::SExpr *SExprBuilder::translateBinaryConditionalOperator(
- const BinaryConditionalOperator *C, CallingContext *Ctx) {
- return new (Arena) til::Undefined(C);
+SExprBuilder::translateAbstractConditionalOperator(
+ const AbstractConditionalOperator *CO, CallingContext *Ctx) {
+ auto *C = translate(CO->getCond(), Ctx);
+ auto *T = translate(CO->getTrueExpr(), Ctx);
+ auto *E = translate(CO->getFalseExpr(), Ctx);
+ return new (Arena) til::IfThenElse(C, T, E);
}
@@ -430,16 +640,14 @@ SExprBuilder::translateDeclStmt(const DeclStmt *S, CallingContext *Ctx) {
// If E is trivial returns E.
til::SExpr *SExprBuilder::addStatement(til::SExpr* E, const Stmt *S,
const ValueDecl *VD) {
- if (!E)
- return nullptr;
- if (til::ThreadSafetyTIL::isTrivial(E))
+ if (!E || !CurrentBB || E->block() || til::ThreadSafetyTIL::isTrivial(E))
return E;
-
- til::Variable *V = new (Arena) til::Variable(E, VD);
- CurrentInstructions.push_back(V);
+ if (VD)
+ E = new (Arena) til::Variable(E, VD);
+ CurrentInstructions.push_back(E);
if (S)
- insertStmt(S, V);
- return V;
+ insertStmt(S, E);
+ return E;
}
@@ -496,11 +704,11 @@ void SExprBuilder::makePhiNodeVar(unsigned i, unsigned NPreds, til::SExpr *E) {
unsigned ArgIndex = CurrentBlockInfo->ProcessedPredecessors;
assert(ArgIndex > 0 && ArgIndex < NPreds);
- til::Variable *V = dyn_cast<til::Variable>(CurrentLVarMap[i].second);
- if (V && V->getBlockID() == CurrentBB->blockID()) {
+ til::SExpr *CurrE = CurrentLVarMap[i].second;
+ if (CurrE->block() == CurrentBB) {
// We already have a Phi node in the current block,
// so just add the new variable to the Phi node.
- til::Phi *Ph = dyn_cast<til::Phi>(V->definition());
+ til::Phi *Ph = dyn_cast<til::Phi>(CurrE);
assert(Ph && "Expecting Phi node.");
if (E)
Ph->values()[ArgIndex] = E;
@@ -509,27 +717,26 @@ void SExprBuilder::makePhiNodeVar(unsigned i, unsigned NPreds, til::SExpr *E) {
// Make a new phi node: phi(..., E)
// All phi args up to the current index are set to the current value.
- til::SExpr *CurrE = CurrentLVarMap[i].second;
til::Phi *Ph = new (Arena) til::Phi(Arena, NPreds);
Ph->values().setValues(NPreds, nullptr);
for (unsigned PIdx = 0; PIdx < ArgIndex; ++PIdx)
Ph->values()[PIdx] = CurrE;
if (E)
Ph->values()[ArgIndex] = E;
+ Ph->setClangDecl(CurrentLVarMap[i].first);
// If E is from a back-edge, or either E or CurrE are incomplete, then
// mark this node as incomplete; we may need to remove it later.
- if (!E || isIncompleteVar(E) || isIncompleteVar(CurrE)) {
+ if (!E || isIncompletePhi(E) || isIncompletePhi(CurrE)) {
Ph->setStatus(til::Phi::PH_Incomplete);
}
// Add Phi node to current block, and update CurrentLVarMap[i]
- auto *Var = new (Arena) til::Variable(Ph, CurrentLVarMap[i].first);
- CurrentArguments.push_back(Var);
+ CurrentArguments.push_back(Ph);
if (Ph->status() == til::Phi::PH_Incomplete)
- IncompleteArgs.push_back(Var);
+ IncompleteArgs.push_back(Ph);
CurrentLVarMap.makeWritable();
- CurrentLVarMap.elem(i).second = Var;
+ CurrentLVarMap.elem(i).second = Ph;
}
@@ -603,15 +810,13 @@ void SExprBuilder::mergePhiNodesBackEdge(const CFGBlock *Blk) {
unsigned ArgIndex = BBInfo[Blk->getBlockID()].ProcessedPredecessors;
assert(ArgIndex > 0 && ArgIndex < BB->numPredecessors());
- for (til::Variable *V : BB->arguments()) {
- til::Phi *Ph = dyn_cast_or_null<til::Phi>(V->definition());
+ for (til::SExpr *PE : BB->arguments()) {
+ til::Phi *Ph = dyn_cast_or_null<til::Phi>(PE);
assert(Ph && "Expecting Phi Node.");
assert(Ph->values()[ArgIndex] == nullptr && "Wrong index for back edge.");
- assert(V->clangDecl() && "No local variable for Phi node.");
- til::SExpr *E = lookupVarDecl(V->clangDecl());
+ til::SExpr *E = lookupVarDecl(Ph->clangDecl());
assert(E && "Couldn't find local variable for Phi node.");
-
Ph->values()[ArgIndex] = E;
}
}
@@ -631,7 +836,6 @@ void SExprBuilder::enterCFG(CFG *Cfg, const NamedDecl *D,
BB->reserveInstructions(B->size());
BlockMap[B->getBlockID()] = BB;
}
- CallCtx.reset(new SExprBuilder::CallingContext(D));
CurrentBB = lookupBlock(&Cfg->getEntry());
auto Parms = isa<ObjCMethodDecl>(D) ? cast<ObjCMethodDecl>(D)->parameters()
@@ -691,13 +895,13 @@ void SExprBuilder::enterCFGBlockBody(const CFGBlock *B) {
// Push those arguments onto the basic block.
CurrentBB->arguments().reserve(
static_cast<unsigned>(CurrentArguments.size()), Arena);
- for (auto *V : CurrentArguments)
- CurrentBB->addArgument(V);
+ for (auto *A : CurrentArguments)
+ CurrentBB->addArgument(A);
}
void SExprBuilder::handleStatement(const Stmt *S) {
- til::SExpr *E = translate(S, CallCtx.get());
+ til::SExpr *E = translate(S, nullptr);
addStatement(E, S);
}
@@ -726,17 +930,16 @@ void SExprBuilder::exitCFGBlockBody(const CFGBlock *B) {
til::BasicBlock *BB = *It ? lookupBlock(*It) : nullptr;
// TODO: set index
unsigned Idx = BB ? BB->findPredecessorIndex(CurrentBB) : 0;
- til::SExpr *Tm = new (Arena) til::Goto(BB, Idx);
+ auto *Tm = new (Arena) til::Goto(BB, Idx);
CurrentBB->setTerminator(Tm);
}
else if (N == 2) {
- til::SExpr *C = translate(B->getTerminatorCondition(true), CallCtx.get());
+ til::SExpr *C = translate(B->getTerminatorCondition(true), nullptr);
til::BasicBlock *BB1 = *It ? lookupBlock(*It) : nullptr;
++It;
til::BasicBlock *BB2 = *It ? lookupBlock(*It) : nullptr;
- unsigned Idx1 = BB1 ? BB1->findPredecessorIndex(CurrentBB) : 0;
- unsigned Idx2 = BB2 ? BB2->findPredecessorIndex(CurrentBB) : 0;
- til::SExpr *Tm = new (Arena) til::Branch(C, BB1, BB2, Idx1, Idx2);
+ // FIXME: make sure these arent' critical edges.
+ auto *Tm = new (Arena) til::Branch(C, BB1, BB2);
CurrentBB->setTerminator(Tm);
}
}
@@ -763,10 +966,9 @@ void SExprBuilder::exitCFGBlock(const CFGBlock *B) {
void SExprBuilder::exitCFG(const CFGBlock *Last) {
- for (auto *V : IncompleteArgs) {
- til::Phi *Ph = dyn_cast<til::Phi>(V->definition());
- if (Ph && Ph->status() == til::Phi::PH_Incomplete)
- simplifyIncompleteArg(V, Ph);
+ for (auto *Ph : IncompleteArgs) {
+ if (Ph->status() == til::Phi::PH_Incomplete)
+ simplifyIncompleteArg(Ph);
}
CurrentArguments.clear();
@@ -775,18 +977,15 @@ void SExprBuilder::exitCFG(const CFGBlock *Last) {
}
-
-class TILPrinter : public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
-
-
+/*
void printSCFG(CFGWalker &Walker) {
llvm::BumpPtrAllocator Bpa;
til::MemRegionRef Arena(&Bpa);
- SExprBuilder builder(Arena);
- til::SCFG *Cfg = builder.buildCFG(Walker);
- TILPrinter::print(Cfg, llvm::errs());
+ SExprBuilder SxBuilder(Arena);
+ til::SCFG *Scfg = SxBuilder.buildCFG(Walker);
+ TILPrinter::print(Scfg, llvm::errs());
}
-
+*/
} // end namespace threadSafety
diff --git a/lib/Analysis/ThreadSafetyTIL.cpp b/lib/Analysis/ThreadSafetyTIL.cpp
index f67cbb90e5ab..ebe374ea609d 100644
--- a/lib/Analysis/ThreadSafetyTIL.cpp
+++ b/lib/Analysis/ThreadSafetyTIL.cpp
@@ -48,12 +48,20 @@ StringRef getBinaryOpcodeString(TIL_BinaryOpcode Op) {
}
+SExpr* Future::force() {
+ Status = FS_evaluating;
+ Result = compute();
+ Status = FS_done;
+ return Result;
+}
+
+
unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
unsigned Idx = Predecessors.size();
Predecessors.reserveCheck(1, Arena);
Predecessors.push_back(Pred);
- for (Variable *V : Args) {
- if (Phi* Ph = dyn_cast<Phi>(V->definition())) {
+ for (SExpr *E : Args) {
+ if (Phi* Ph = dyn_cast<Phi>(E)) {
Ph->values().reserveCheck(1, Arena);
Ph->values().push_back(nullptr);
}
@@ -61,93 +69,275 @@ unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
return Idx;
}
+
void BasicBlock::reservePredecessors(unsigned NumPreds) {
Predecessors.reserve(NumPreds, Arena);
- for (Variable *V : Args) {
- if (Phi* Ph = dyn_cast<Phi>(V->definition())) {
+ for (SExpr *E : Args) {
+ if (Phi* Ph = dyn_cast<Phi>(E)) {
Ph->values().reserve(NumPreds, Arena);
}
}
}
-void BasicBlock::renumberVars() {
- unsigned VID = 0;
- for (Variable *V : Args) {
- V->setID(BlockID, VID++);
- }
- for (Variable *V : Instrs) {
- V->setID(BlockID, VID++);
- }
-}
-void SCFG::renumberVars() {
- for (BasicBlock *B : Blocks) {
- B->renumberVars();
+// If E is a variable, then trace back through any aliases or redundant
+// Phi nodes to find the canonical definition.
+const SExpr *getCanonicalVal(const SExpr *E) {
+ while (true) {
+ if (auto *V = dyn_cast<Variable>(E)) {
+ if (V->kind() == Variable::VK_Let) {
+ E = V->definition();
+ continue;
+ }
+ }
+ if (const Phi *Ph = dyn_cast<Phi>(E)) {
+ if (Ph->status() == Phi::PH_SingleVal) {
+ E = Ph->values()[0];
+ continue;
+ }
+ }
+ break;
}
+ return E;
}
-
-
// If E is a variable, then trace back through any aliases or redundant
// Phi nodes to find the canonical definition.
-SExpr *getCanonicalVal(SExpr *E) {
- while (auto *V = dyn_cast<Variable>(E)) {
- SExpr *D;
- do {
+// The non-const version will simplify incomplete Phi nodes.
+SExpr *simplifyToCanonicalVal(SExpr *E) {
+ while (true) {
+ if (auto *V = dyn_cast<Variable>(E)) {
if (V->kind() != Variable::VK_Let)
return V;
- D = V->definition();
- auto *V2 = dyn_cast<Variable>(D);
- if (V2)
- V = V2;
- else
- break;
- } while (true);
-
- if (ThreadSafetyTIL::isTrivial(D))
- return D;
-
- if (Phi *Ph = dyn_cast<Phi>(D)) {
+ // Eliminate redundant variables, e.g. x = y, or x = 5,
+ // but keep anything more complicated.
+ if (til::ThreadSafetyTIL::isTrivial(V->definition())) {
+ E = V->definition();
+ continue;
+ }
+ return V;
+ }
+ if (auto *Ph = dyn_cast<Phi>(E)) {
if (Ph->status() == Phi::PH_Incomplete)
- simplifyIncompleteArg(V, Ph);
-
+ simplifyIncompleteArg(Ph);
+ // Eliminate redundant Phi nodes.
if (Ph->status() == Phi::PH_SingleVal) {
E = Ph->values()[0];
continue;
}
}
- return V;
+ return E;
}
- return E;
}
// Trace the arguments of an incomplete Phi node to see if they have the same
// canonical definition. If so, mark the Phi node as redundant.
// getCanonicalVal() will recursively call simplifyIncompletePhi().
-void simplifyIncompleteArg(Variable *V, til::Phi *Ph) {
+void simplifyIncompleteArg(til::Phi *Ph) {
assert(Ph && Ph->status() == Phi::PH_Incomplete);
// eliminate infinite recursion -- assume that this node is not redundant.
Ph->setStatus(Phi::PH_MultiVal);
- SExpr *E0 = getCanonicalVal(Ph->values()[0]);
+ SExpr *E0 = simplifyToCanonicalVal(Ph->values()[0]);
for (unsigned i=1, n=Ph->values().size(); i<n; ++i) {
- SExpr *Ei = getCanonicalVal(Ph->values()[i]);
- if (Ei == V)
+ SExpr *Ei = simplifyToCanonicalVal(Ph->values()[i]);
+ if (Ei == Ph)
continue; // Recursive reference to itself. Don't count.
if (Ei != E0) {
return; // Status is already set to MultiVal.
}
}
Ph->setStatus(Phi::PH_SingleVal);
- // Eliminate Redundant Phi node.
- V->setDefinition(Ph->values()[0]);
}
+// Renumbers the arguments and instructions to have unique, sequential IDs.
+int BasicBlock::renumberInstrs(int ID) {
+ for (auto *Arg : Args)
+ Arg->setID(this, ID++);
+ for (auto *Instr : Instrs)
+ Instr->setID(this, ID++);
+ TermInstr->setID(this, ID++);
+ return ID;
+}
+
+// Sorts the CFGs blocks using a reverse post-order depth-first traversal.
+// Each block will be written into the Blocks array in order, and its BlockID
+// will be set to the index in the array. Sorting should start from the entry
+// block, and ID should be the total number of blocks.
+int BasicBlock::topologicalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
+ if (Visited) return ID;
+ Visited = true;
+ for (auto *Block : successors())
+ ID = Block->topologicalSort(Blocks, ID);
+ // set ID and update block array in place.
+ // We may lose pointers to unreachable blocks.
+ assert(ID > 0);
+ BlockID = --ID;
+ Blocks[BlockID] = this;
+ return ID;
+}
+
+// Performs a reverse topological traversal, starting from the exit block and
+// following back-edges. The dominator is serialized before any predecessors,
+// which guarantees that all blocks are serialized after their dominator and
+// before their post-dominator (because it's a reverse topological traversal).
+// ID should be initially set to 0.
+//
+// This sort assumes that (1) dominators have been computed, (2) there are no
+// critical edges, and (3) the entry block is reachable from the exit block
+// and no blocks are accessable via traversal of back-edges from the exit that
+// weren't accessable via forward edges from the entry.
+int BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
+ // Visited is assumed to have been set by the topologicalSort. This pass
+ // assumes !Visited means that we've visited this node before.
+ if (!Visited) return ID;
+ Visited = false;
+ if (DominatorNode.Parent)
+ ID = DominatorNode.Parent->topologicalFinalSort(Blocks, ID);
+ for (auto *Pred : Predecessors)
+ ID = Pred->topologicalFinalSort(Blocks, ID);
+ assert(static_cast<size_t>(ID) < Blocks.size());
+ BlockID = ID++;
+ Blocks[BlockID] = this;
+ return ID;
+}
+
+// Computes the immediate dominator of the current block. Assumes that all of
+// its predecessors have already computed their dominators. This is achieved
+// by visiting the nodes in topological order.
+void BasicBlock::computeDominator() {
+ BasicBlock *Candidate = nullptr;
+ // Walk backwards from each predecessor to find the common dominator node.
+ for (auto *Pred : Predecessors) {
+ // Skip back-edges
+ if (Pred->BlockID >= BlockID) continue;
+ // If we don't yet have a candidate for dominator yet, take this one.
+ if (Candidate == nullptr) {
+ Candidate = Pred;
+ continue;
+ }
+ // Walk the alternate and current candidate back to find a common ancestor.
+ auto *Alternate = Pred;
+ while (Alternate != Candidate) {
+ if (Candidate->BlockID > Alternate->BlockID)
+ Candidate = Candidate->DominatorNode.Parent;
+ else
+ Alternate = Alternate->DominatorNode.Parent;
+ }
+ }
+ DominatorNode.Parent = Candidate;
+ DominatorNode.SizeOfSubTree = 1;
+}
+
+// Computes the immediate post-dominator of the current block. Assumes that all
+// of its successors have already computed their post-dominators. This is
+// achieved visiting the nodes in reverse topological order.
+void BasicBlock::computePostDominator() {
+ BasicBlock *Candidate = nullptr;
+ // Walk back from each predecessor to find the common post-dominator node.
+ for (auto *Succ : successors()) {
+ // Skip back-edges
+ if (Succ->BlockID <= BlockID) continue;
+ // If we don't yet have a candidate for post-dominator yet, take this one.
+ if (Candidate == nullptr) {
+ Candidate = Succ;
+ continue;
+ }
+ // Walk the alternate and current candidate back to find a common ancestor.
+ auto *Alternate = Succ;
+ while (Alternate != Candidate) {
+ if (Candidate->BlockID < Alternate->BlockID)
+ Candidate = Candidate->PostDominatorNode.Parent;
+ else
+ Alternate = Alternate->PostDominatorNode.Parent;
+ }
+ }
+ PostDominatorNode.Parent = Candidate;
+ PostDominatorNode.SizeOfSubTree = 1;
+}
+
+
+// Renumber instructions in all blocks
+void SCFG::renumberInstrs() {
+ int InstrID = 0;
+ for (auto *Block : Blocks)
+ InstrID = Block->renumberInstrs(InstrID);
+}
+
+
+static inline void computeNodeSize(BasicBlock *B,
+ BasicBlock::TopologyNode BasicBlock::*TN) {
+ BasicBlock::TopologyNode *N = &(B->*TN);
+ if (N->Parent) {
+ BasicBlock::TopologyNode *P = &(N->Parent->*TN);
+ // Initially set ID relative to the (as yet uncomputed) parent ID
+ N->NodeID = P->SizeOfSubTree;
+ P->SizeOfSubTree += N->SizeOfSubTree;
+ }
+}
+
+static inline void computeNodeID(BasicBlock *B,
+ BasicBlock::TopologyNode BasicBlock::*TN) {
+ BasicBlock::TopologyNode *N = &(B->*TN);
+ if (N->Parent) {
+ BasicBlock::TopologyNode *P = &(N->Parent->*TN);
+ N->NodeID += P->NodeID; // Fix NodeIDs relative to starting node.
+ }
+}
+
+
+// Normalizes a CFG. Normalization has a few major components:
+// 1) Removing unreachable blocks.
+// 2) Computing dominators and post-dominators
+// 3) Topologically sorting the blocks into the "Blocks" array.
+void SCFG::computeNormalForm() {
+ // Topologically sort the blocks starting from the entry block.
+ int NumUnreachableBlocks = Entry->topologicalSort(Blocks, Blocks.size());
+ if (NumUnreachableBlocks > 0) {
+ // If there were unreachable blocks shift everything down, and delete them.
+ for (size_t I = NumUnreachableBlocks, E = Blocks.size(); I < E; ++I) {
+ size_t NI = I - NumUnreachableBlocks;
+ Blocks[NI] = Blocks[I];
+ Blocks[NI]->BlockID = NI;
+ // FIXME: clean up predecessor pointers to unreachable blocks?
+ }
+ Blocks.drop(NumUnreachableBlocks);
+ }
+
+ // Compute dominators.
+ for (auto *Block : Blocks)
+ Block->computeDominator();
+
+ // Once dominators have been computed, the final sort may be performed.
+ int NumBlocks = Exit->topologicalFinalSort(Blocks, 0);
+ assert(static_cast<size_t>(NumBlocks) == Blocks.size());
+ (void) NumBlocks;
+
+ // Renumber the instructions now that we have a final sort.
+ renumberInstrs();
+
+ // Compute post-dominators and compute the sizes of each node in the
+ // dominator tree.
+ for (auto *Block : Blocks.reverse()) {
+ Block->computePostDominator();
+ computeNodeSize(Block, &BasicBlock::DominatorNode);
+ }
+ // Compute the sizes of each node in the post-dominator tree and assign IDs in
+ // the dominator tree.
+ for (auto *Block : Blocks) {
+ computeNodeID(Block, &BasicBlock::DominatorNode);
+ computeNodeSize(Block, &BasicBlock::PostDominatorNode);
+ }
+ // Assign IDs in the post-dominator tree.
+ for (auto *Block : Blocks.reverse()) {
+ computeNodeID(Block, &BasicBlock::PostDominatorNode);
+ }
+}
+
} // end namespace til
} // end namespace threadSafety
} // end namespace clang
-
diff --git a/lib/Analysis/UninitializedValues.cpp b/lib/Analysis/UninitializedValues.cpp
index f5c786ac3456..61a259217d7d 100644
--- a/lib/Analysis/UninitializedValues.cpp
+++ b/lib/Analysis/UninitializedValues.cpp
@@ -360,13 +360,28 @@ void ClassifyRefs::classify(const Expr *E, Class C) {
// The result of a ?: could also be an lvalue.
E = E->IgnoreParens();
if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
- const Expr *TrueExpr = CO->getTrueExpr();
- if (!isa<OpaqueValueExpr>(TrueExpr))
- classify(TrueExpr, C);
+ classify(CO->getTrueExpr(), C);
classify(CO->getFalseExpr(), C);
return;
}
+ if (const BinaryConditionalOperator *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ classify(BCO->getFalseExpr(), C);
+ return;
+ }
+
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+ classify(OVE->getSourceExpr(), C);
+ return;
+ }
+
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma)
+ classify(BO->getRHS(), C);
+ return;
+ }
+
FindVarResult Var = findVar(E, DC);
if (const DeclRefExpr *DRE = Var.getDeclRefExpr())
Classification[DRE] = std::max(Classification[DRE], C);
@@ -401,6 +416,17 @@ void ClassifyRefs::VisitUnaryOperator(UnaryOperator *UO) {
}
void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
+ // Classify arguments to std::move as used.
+ if (CE->getNumArgs() == 1) {
+ if (FunctionDecl *FD = CE->getDirectCallee()) {
+ if (FD->isInStdNamespace() && FD->getIdentifier() &&
+ FD->getIdentifier()->isStr("move")) {
+ classify(CE->getArg(0), Use);
+ return;
+ }
+ }
+ }
+
// If a value is passed by const reference to a function, we should not assume
// that it is initialized by the call, and we conservatively do not assume
// that it is used.
diff --git a/lib/Basic/Attributes.cpp b/lib/Basic/Attributes.cpp
index a05ad053db04..da9ac793f163 100644
--- a/lib/Basic/Attributes.cpp
+++ b/lib/Basic/Attributes.cpp
@@ -3,7 +3,7 @@
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
-bool clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
+int clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
const IdentifierInfo *Attr, const llvm::Triple &T,
const LangOptions &LangOpts) {
StringRef Name = Attr->getName();
@@ -13,5 +13,5 @@ bool clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
#include "clang/Basic/AttrHasAttributeImpl.inc"
- return false;
+ return 0;
}
diff --git a/lib/Basic/CMakeLists.txt b/lib/Basic/CMakeLists.txt
index 0df82b3bc906..50a06d9e7a42 100644
--- a/lib/Basic/CMakeLists.txt
+++ b/lib/Basic/CMakeLists.txt
@@ -1,8 +1,60 @@
set(LLVM_LINK_COMPONENTS
+ Core
MC
Support
)
+# Figure out if we can track VC revisions.
+function(find_first_existing_file out_var)
+ foreach(file ${ARGN})
+ if(EXISTS "${file}")
+ set(${out_var} "${file}" PARENT_SCOPE)
+ return()
+ endif()
+ endforeach()
+endfunction()
+
+macro(find_first_existing_vc_file out_var path)
+ find_first_existing_file(${out_var}
+ "${path}/.git/logs/HEAD" # Git
+ "${path}/.svn/wc.db" # SVN 1.7
+ "${path}/.svn/entries" # SVN 1.6
+ )
+endmacro()
+
+find_first_existing_vc_file(llvm_vc "${LLVM_MAIN_SRC_DIR}")
+find_first_existing_vc_file(clang_vc "${CLANG_SOURCE_DIR}")
+
+# The VC revision include that we want to generate.
+set(version_inc "${CMAKE_CURRENT_BINARY_DIR}/SVNVersion.inc")
+
+set(get_svn_script "${LLVM_MAIN_SRC_DIR}/cmake/modules/GetSVN.cmake")
+
+if(DEFINED llvm_vc AND DEFINED clang_vc)
+ # Create custom target to generate the VC revision include.
+ add_custom_command(OUTPUT "${version_inc}"
+ DEPENDS "${llvm_vc}" "${clang_vc}" "${get_svn_script}"
+ COMMAND
+ ${CMAKE_COMMAND} "-DFIRST_SOURCE_DIR=${LLVM_MAIN_SRC_DIR}"
+ "-DFIRST_NAME=LLVM"
+ "-DSECOND_SOURCE_DIR=${CLANG_SOURCE_DIR}"
+ "-DSECOND_NAME=SVN"
+ "-DHEADER_FILE=${version_inc}"
+ -P "${get_svn_script}")
+
+ # Mark the generated header as being generated.
+ set_source_files_properties("${version_inc}"
+ PROPERTIES GENERATED TRUE
+ HEADER_FILE_ONLY TRUE)
+
+ # Tell Version.cpp that it needs to build with -DHAVE_SVN_VERSION_INC.
+ set_source_files_properties(Version.cpp
+ PROPERTIES COMPILE_DEFINITIONS "HAVE_SVN_VERSION_INC")
+else()
+ # Not producing a VC revision include.
+ set(version_inc)
+endif()
+
add_clang_library(clangBasic
Attributes.cpp
Builtins.cpp
@@ -17,6 +69,8 @@ add_clang_library(clangBasic
ObjCRuntime.cpp
OpenMPKinds.cpp
OperatorPrecedence.cpp
+ SanitizerBlacklist.cpp
+ Sanitizers.cpp
SourceLocation.cpp
SourceManager.cpp
TargetInfo.cpp
@@ -26,30 +80,6 @@ add_clang_library(clangBasic
VersionTuple.cpp
VirtualFileSystem.cpp
Warnings.cpp
+ ${version_inc}
)
-# Determine Subversion revision.
-# FIXME: This only gets updated when CMake is run, so this revision number
-# may be out-of-date!
-if( NOT IS_SYMLINK "${CLANG_SOURCE_DIR}" ) # See PR 8437
- find_package(Subversion)
-endif()
-if (Subversion_FOUND AND EXISTS "${CLANG_SOURCE_DIR}/.svn")
- set(FIRST_SOURCE_DIR ${LLVM_MAIN_SRC_DIR})
- set(FIRST_REPOSITORY LLVM_REPOSITORY)
- set(SECOND_SOURCE_DIR ${CLANG_SOURCE_DIR})
- set(SECOND_REPOSITORY SVN_REPOSITORY)
- set(HEADER_FILE ${CMAKE_CURRENT_BINARY_DIR}/SVNVersion.inc)
- include(GetSVN)
-
- # Mark the generated header as being generated.
- message(STATUS "Expecting header to go in ${CMAKE_CURRENT_BINARY_DIR}/SVNVersion.inc")
- set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/SVNVersion.inc
- PROPERTIES GENERATED TRUE
- HEADER_FILE_ONLY TRUE)
-
- # Tell Version.cpp that it needs to build with -DHAVE_SVN_VERSION_INC.
- set_source_files_properties(Version.cpp
- PROPERTIES COMPILE_DEFINITIONS "HAVE_SVN_VERSION_INC")
-
-endif()
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index 4567e3267131..83228ad3c55d 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -19,6 +19,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Support/Locale.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -33,13 +34,11 @@ static void DummyArgToStringFn(DiagnosticsEngine::ArgumentKind AK, intptr_t QT,
Output.append(Str.begin(), Str.end());
}
-
DiagnosticsEngine::DiagnosticsEngine(
- const IntrusiveRefCntPtr<DiagnosticIDs> &diags,
- DiagnosticOptions *DiagOpts,
- DiagnosticConsumer *client, bool ShouldOwnClient)
- : Diags(diags), DiagOpts(DiagOpts), Client(client),
- OwnsDiagClient(ShouldOwnClient), SourceMgr(nullptr) {
+ const IntrusiveRefCntPtr<DiagnosticIDs> &diags, DiagnosticOptions *DiagOpts,
+ DiagnosticConsumer *client, bool ShouldOwnClient)
+ : Diags(diags), DiagOpts(DiagOpts), Client(nullptr), SourceMgr(nullptr) {
+ setClient(client, ShouldOwnClient);
ArgToStringFn = DummyArgToStringFn;
ArgToStringCookie = nullptr;
@@ -64,17 +63,15 @@ DiagnosticsEngine::DiagnosticsEngine(
}
DiagnosticsEngine::~DiagnosticsEngine() {
- if (OwnsDiagClient)
- delete Client;
+ // If we own the diagnostic client, destroy it first so that it can access the
+ // engine from its destructor.
+ setClient(nullptr);
}
void DiagnosticsEngine::setClient(DiagnosticConsumer *client,
bool ShouldOwnClient) {
- if (OwnsDiagClient && Client)
- delete Client;
-
+ Owner.reset(ShouldOwnClient ? client : nullptr);
Client = client;
- OwnsDiagClient = ShouldOwnClient;
}
void DiagnosticsEngine::pushMappings(SourceLocation Loc) {
@@ -101,7 +98,6 @@ void DiagnosticsEngine::Reset() {
NumWarnings = 0;
NumErrors = 0;
- NumErrorsSuppressed = 0;
TrapNumErrorsOccurred = 0;
TrapNumUnrecoverableErrorsOccurred = 0;
@@ -232,13 +228,13 @@ bool DiagnosticsEngine::setSeverityForGroup(diag::Flavor Flavor,
StringRef Group, diag::Severity Map,
SourceLocation Loc) {
// Get the diagnostics in this group.
- SmallVector<diag::kind, 8> GroupDiags;
+ SmallVector<diag::kind, 256> GroupDiags;
if (Diags->getDiagnosticsInGroup(Flavor, Group, GroupDiags))
return true;
// Set the mapping.
- for (unsigned i = 0, e = GroupDiags.size(); i != e; ++i)
- setSeverity(GroupDiags[i], Map, Loc);
+ for (diag::kind Diag : GroupDiags)
+ setSeverity(Diag, Map, Loc);
return false;
}
@@ -634,6 +630,20 @@ void Diagnostic::
FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
SmallVectorImpl<char> &OutStr) const {
+ // When the diagnostic string is only "%0", the entire string is being given
+ // by an outside source. Remove unprintable characters from this string
+ // and skip all the other string processing.
+ if (DiagEnd - DiagStr == 2 && DiagStr[0] == '%' && DiagStr[1] == '0' &&
+ getArgKind(0) == DiagnosticsEngine::ak_std_string) {
+ const std::string &S = getArgStdStr(0);
+ for (char c : S) {
+ if (llvm::sys::locale::isPrint(c) || c == '\t') {
+ OutStr.push_back(c);
+ }
+ }
+ return;
+ }
+
/// FormattedArgs - Keep track of all of the arguments formatted by
/// ConvertArgToString and pass them into subsequent calls to
/// ConvertArgToString, allowing the implementation to avoid redundancies in
diff --git a/lib/Basic/DiagnosticIDs.cpp b/lib/Basic/DiagnosticIDs.cpp
index ec244ccda3cb..1c68375f3cdd 100644
--- a/lib/Basic/DiagnosticIDs.cpp
+++ b/lib/Basic/DiagnosticIDs.cpp
@@ -428,16 +428,9 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
// Upgrade ignored diagnostics if -Weverything is enabled.
if (Diag.EnableAllWarnings && Result == diag::Severity::Ignored &&
- !Mapping.isUser())
+ !Mapping.isUser() && getBuiltinDiagClass(DiagID) != CLASS_REMARK)
Result = diag::Severity::Warning;
- // Diagnostics of class REMARK are either printed as remarks or in case they
- // have been added to -Werror they are printed as errors.
- // FIXME: Disregarding user-requested remark mappings like this is bogus.
- if (Result == diag::Severity::Warning &&
- getBuiltinDiagClass(DiagID) == CLASS_REMARK)
- Result = diag::Severity::Remark;
-
// Ignore -pedantic diagnostics inside __extension__ blocks.
// (The diagnostics controlled by -pedantic are the extension diagnostics
// that are not enabled by default.)
@@ -613,9 +606,6 @@ StringRef DiagnosticIDs::getNearestOption(diag::Flavor Flavor,
bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
Diagnostic Info(&Diag);
- if (Diag.SuppressAllDiagnostics)
- return false;
-
assert(Diag.getClient() && "DiagnosticClient not set!");
// Figure out the diagnostic level of this message.
@@ -623,6 +613,17 @@ bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
DiagnosticIDs::Level DiagLevel
= getDiagnosticLevel(DiagID, Info.getLocation(), Diag);
+ // Update counts for DiagnosticErrorTrap even if a fatal error occurred
+ // or diagnostics are suppressed.
+ if (DiagLevel >= DiagnosticIDs::Error) {
+ ++Diag.TrapNumErrorsOccurred;
+ if (isUnrecoverable(DiagID))
+ ++Diag.TrapNumUnrecoverableErrorsOccurred;
+ }
+
+ if (Diag.SuppressAllDiagnostics)
+ return false;
+
if (DiagLevel != DiagnosticIDs::Note) {
// Record that a fatal error occurred only when we see a second
// non-note diagnostic. This allows notes to be attached to the
@@ -634,20 +635,12 @@ bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
Diag.LastDiagLevel = DiagLevel;
}
- // Update counts for DiagnosticErrorTrap even if a fatal error occurred.
- if (DiagLevel >= DiagnosticIDs::Error) {
- ++Diag.TrapNumErrorsOccurred;
- if (isUnrecoverable(DiagID))
- ++Diag.TrapNumUnrecoverableErrorsOccurred;
- }
-
// If a fatal error has already been emitted, silence all subsequent
// diagnostics.
if (Diag.FatalErrorOccurred) {
if (DiagLevel >= DiagnosticIDs::Error &&
Diag.Client->IncludeInDiagnosticCounts()) {
++Diag.NumErrors;
- ++Diag.NumErrorsSuppressed;
}
return false;
diff --git a/lib/Basic/FileManager.cpp b/lib/Basic/FileManager.cpp
index 94210320bff7..214e0f35a52e 100644
--- a/lib/Basic/FileManager.cpp
+++ b/lib/Basic/FileManager.cpp
@@ -64,20 +64,20 @@ FileManager::~FileManager() {
delete VirtualDirectoryEntries[i];
}
-void FileManager::addStatCache(FileSystemStatCache *statCache,
+void FileManager::addStatCache(std::unique_ptr<FileSystemStatCache> statCache,
bool AtBeginning) {
assert(statCache && "No stat cache provided?");
if (AtBeginning || !StatCache.get()) {
- statCache->setNextStatCache(StatCache.release());
- StatCache.reset(statCache);
+ statCache->setNextStatCache(std::move(StatCache));
+ StatCache = std::move(statCache);
return;
}
FileSystemStatCache *LastCache = StatCache.get();
while (LastCache->getNextStatCache())
LastCache = LastCache->getNextStatCache();
-
- LastCache->setNextStatCache(statCache);
+
+ LastCache->setNextStatCache(std::move(statCache));
}
void FileManager::removeStatCache(FileSystemStatCache *statCache) {
@@ -86,7 +86,7 @@ void FileManager::removeStatCache(FileSystemStatCache *statCache) {
if (StatCache.get() == statCache) {
// This is the first stat cache.
- StatCache.reset(StatCache->takeNextStatCache());
+ StatCache = StatCache->takeNextStatCache();
return;
}
@@ -96,7 +96,7 @@ void FileManager::removeStatCache(FileSystemStatCache *statCache) {
PrevCache = PrevCache->getNextStatCache();
assert(PrevCache && "Stat cache not found for removal");
- PrevCache->setNextStatCache(statCache->getNextStatCache());
+ PrevCache->setNextStatCache(statCache->takeNextStatCache());
}
void FileManager::clearStatCaches() {
@@ -129,20 +129,20 @@ void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
if (DirName.empty())
return;
- llvm::StringMapEntry<DirectoryEntry *> &NamedDirEnt =
- SeenDirEntries.GetOrCreateValue(DirName);
+ auto &NamedDirEnt =
+ *SeenDirEntries.insert(std::make_pair(DirName, nullptr)).first;
// When caching a virtual directory, we always cache its ancestors
// at the same time. Therefore, if DirName is already in the cache,
// we don't need to recurse as its ancestors must also already be in
// the cache.
- if (NamedDirEnt.getValue())
+ if (NamedDirEnt.second)
return;
// Add the virtual directory to the cache.
DirectoryEntry *UDE = new DirectoryEntry;
- UDE->Name = NamedDirEnt.getKeyData();
- NamedDirEnt.setValue(UDE);
+ UDE->Name = NamedDirEnt.first().data();
+ NamedDirEnt.second = UDE;
VirtualDirectoryEntries.push_back(UDE);
// Recursively add the other ancestors.
@@ -170,23 +170,23 @@ const DirectoryEntry *FileManager::getDirectory(StringRef DirName,
#endif
++NumDirLookups;
- llvm::StringMapEntry<DirectoryEntry *> &NamedDirEnt =
- SeenDirEntries.GetOrCreateValue(DirName);
+ auto &NamedDirEnt =
+ *SeenDirEntries.insert(std::make_pair(DirName, nullptr)).first;
// See if there was already an entry in the map. Note that the map
// contains both virtual and real directories.
- if (NamedDirEnt.getValue())
- return NamedDirEnt.getValue() == NON_EXISTENT_DIR ? nullptr
- : NamedDirEnt.getValue();
+ if (NamedDirEnt.second)
+ return NamedDirEnt.second == NON_EXISTENT_DIR ? nullptr
+ : NamedDirEnt.second;
++NumDirCacheMisses;
// By default, initialize it to invalid.
- NamedDirEnt.setValue(NON_EXISTENT_DIR);
+ NamedDirEnt.second = NON_EXISTENT_DIR;
// Get the null-terminated directory name as stored as the key of the
// SeenDirEntries map.
- const char *InterndDirName = NamedDirEnt.getKeyData();
+ const char *InterndDirName = NamedDirEnt.first().data();
// Check to see if the directory exists.
FileData Data;
@@ -203,7 +203,7 @@ const DirectoryEntry *FileManager::getDirectory(StringRef DirName,
// Windows).
DirectoryEntry &UDE = UniqueRealDirs[Data.UniqueID];
- NamedDirEnt.setValue(&UDE);
+ NamedDirEnt.second = &UDE;
if (!UDE.getName()) {
// We don't have this directory yet, add it. We use the string
// key from the SeenDirEntries map as the string.
@@ -218,22 +218,22 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
++NumFileLookups;
// See if there is already an entry in the map.
- llvm::StringMapEntry<FileEntry *> &NamedFileEnt =
- SeenFileEntries.GetOrCreateValue(Filename);
+ auto &NamedFileEnt =
+ *SeenFileEntries.insert(std::make_pair(Filename, nullptr)).first;
// See if there is already an entry in the map.
- if (NamedFileEnt.getValue())
- return NamedFileEnt.getValue() == NON_EXISTENT_FILE
- ? nullptr : NamedFileEnt.getValue();
+ if (NamedFileEnt.second)
+ return NamedFileEnt.second == NON_EXISTENT_FILE ? nullptr
+ : NamedFileEnt.second;
++NumFileCacheMisses;
// By default, initialize it to invalid.
- NamedFileEnt.setValue(NON_EXISTENT_FILE);
+ NamedFileEnt.second = NON_EXISTENT_FILE;
// Get the null-terminated file name as stored as the key of the
// SeenFileEntries map.
- const char *InterndFileName = NamedFileEnt.getKeyData();
+ const char *InterndFileName = NamedFileEnt.first().data();
// Look up the directory for the file. When looking up something like
// sys/foo.h we'll discover all of the search directories that have a 'sys'
@@ -269,7 +269,21 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
// This occurs when one dir is symlinked to another, for example.
FileEntry &UFE = UniqueRealFiles[Data.UniqueID];
- NamedFileEnt.setValue(&UFE);
+ NamedFileEnt.second = &UFE;
+
+ // If the name returned by getStatValue is different than Filename, re-intern
+ // the name.
+ if (Data.Name != Filename) {
+ auto &NamedFileEnt =
+ *SeenFileEntries.insert(std::make_pair(Data.Name, nullptr)).first;
+ if (!NamedFileEnt.second)
+ NamedFileEnt.second = &UFE;
+ else
+ assert(NamedFileEnt.second == &UFE &&
+ "filename from getStatValue() refers to wrong file");
+ InterndFileName = NamedFileEnt.first().data();
+ }
+
if (UFE.isValid()) { // Already have an entry with this inode, return it.
// FIXME: this hack ensures that if we look up a file by a virtual path in
@@ -281,11 +295,18 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
if (DirInfo != UFE.Dir && Data.IsVFSMapped)
UFE.Dir = DirInfo;
+ // Always update the name to use the last name by which a file was accessed.
+ // FIXME: Neither this nor always using the first name is correct; we want
+ // to switch towards a design where we return a FileName object that
+ // encapsulates both the name by which the file was accessed and the
+ // corresponding FileEntry.
+ UFE.Name = InterndFileName;
+
return &UFE;
}
// Otherwise, we don't have this file yet, add it.
- UFE.Name = Data.Name;
+ UFE.Name = InterndFileName;
UFE.Size = Data.Size;
UFE.ModTime = Data.ModTime;
UFE.Dir = DirInfo;
@@ -304,17 +325,17 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
++NumFileLookups;
// See if there is already an entry in the map.
- llvm::StringMapEntry<FileEntry *> &NamedFileEnt =
- SeenFileEntries.GetOrCreateValue(Filename);
+ auto &NamedFileEnt =
+ *SeenFileEntries.insert(std::make_pair(Filename, nullptr)).first;
// See if there is already an entry in the map.
- if (NamedFileEnt.getValue() && NamedFileEnt.getValue() != NON_EXISTENT_FILE)
- return NamedFileEnt.getValue();
+ if (NamedFileEnt.second && NamedFileEnt.second != NON_EXISTENT_FILE)
+ return NamedFileEnt.second;
++NumFileCacheMisses;
// By default, initialize it to invalid.
- NamedFileEnt.setValue(NON_EXISTENT_FILE);
+ NamedFileEnt.second = NON_EXISTENT_FILE;
addAncestorsAsVirtualDirs(Filename);
FileEntry *UFE = nullptr;
@@ -329,13 +350,13 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
// Check to see if the file exists. If so, drop the virtual file
FileData Data;
- const char *InterndFileName = NamedFileEnt.getKeyData();
+ const char *InterndFileName = NamedFileEnt.first().data();
if (getStatValue(InterndFileName, Data, true, nullptr) == 0) {
Data.Size = Size;
Data.ModTime = ModificationTime;
UFE = &UniqueRealFiles[Data.UniqueID];
- NamedFileEnt.setValue(UFE);
+ NamedFileEnt.second = UFE;
// If we had already opened this file, close it now so we don't
// leak the descriptor. We're not going to use the file
@@ -355,7 +376,7 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
if (!UFE) {
UFE = new FileEntry();
VirtualFileEntries.push_back(UFE);
- NamedFileEnt.setValue(UFE);
+ NamedFileEnt.second = UFE;
}
UFE->Name = InterndFileName;
@@ -379,12 +400,9 @@ void FileManager::FixupRelativePath(SmallVectorImpl<char> &path) const {
path = NewPath;
}
-llvm::MemoryBuffer *FileManager::
-getBufferForFile(const FileEntry *Entry, std::string *ErrorStr,
- bool isVolatile, bool ShouldCloseOpenFile) {
- std::unique_ptr<llvm::MemoryBuffer> Result;
- std::error_code ec;
-
+llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile,
+ bool ShouldCloseOpenFile) {
uint64_t FileSize = Entry->getSize();
// If there's a high enough chance that the file have changed since we
// got its size, force a stat before opening it.
@@ -394,53 +412,36 @@ getBufferForFile(const FileEntry *Entry, std::string *ErrorStr,
const char *Filename = Entry->getName();
// If the file is already open, use the open file descriptor.
if (Entry->File) {
- ec = Entry->File->getBuffer(Filename, Result, FileSize,
- /*RequiresNullTerminator=*/true, isVolatile);
- if (ErrorStr)
- *ErrorStr = ec.message();
+ auto Result =
+ Entry->File->getBuffer(Filename, FileSize,
+ /*RequiresNullTerminator=*/true, isVolatile);
// FIXME: we need a set of APIs that can make guarantees about whether a
// FileEntry is open or not.
if (ShouldCloseOpenFile)
Entry->closeFile();
- return Result.release();
+ return Result;
}
// Otherwise, open the file.
- if (FileSystemOpts.WorkingDir.empty()) {
- ec = FS->getBufferForFile(Filename, Result, FileSize,
- /*RequiresNullTerminator=*/true, isVolatile);
- if (ec && ErrorStr)
- *ErrorStr = ec.message();
- return Result.release();
- }
+ if (FileSystemOpts.WorkingDir.empty())
+ return FS->getBufferForFile(Filename, FileSize,
+ /*RequiresNullTerminator=*/true, isVolatile);
SmallString<128> FilePath(Entry->getName());
FixupRelativePath(FilePath);
- ec = FS->getBufferForFile(FilePath.str(), Result, FileSize,
- /*RequiresNullTerminator=*/true, isVolatile);
- if (ec && ErrorStr)
- *ErrorStr = ec.message();
- return Result.release();
+ return FS->getBufferForFile(FilePath.str(), FileSize,
+ /*RequiresNullTerminator=*/true, isVolatile);
}
-llvm::MemoryBuffer *FileManager::
-getBufferForFile(StringRef Filename, std::string *ErrorStr) {
- std::unique_ptr<llvm::MemoryBuffer> Result;
- std::error_code ec;
- if (FileSystemOpts.WorkingDir.empty()) {
- ec = FS->getBufferForFile(Filename, Result);
- if (ec && ErrorStr)
- *ErrorStr = ec.message();
- return Result.release();
- }
+llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+FileManager::getBufferForFile(StringRef Filename) {
+ if (FileSystemOpts.WorkingDir.empty())
+ return FS->getBufferForFile(Filename);
SmallString<128> FilePath(Filename);
FixupRelativePath(FilePath);
- ec = FS->getBufferForFile(FilePath.c_str(), Result);
- if (ec && ErrorStr)
- *ErrorStr = ec.message();
- return Result.release();
+ return FS->getBufferForFile(FilePath.c_str());
}
/// getStatValue - Get the 'stat' information for the specified path,
@@ -512,15 +513,47 @@ void FileManager::modifyFileEntry(FileEntry *File,
File->ModTime = ModificationTime;
}
+/// Remove '.' path components from the given absolute path.
+/// \return \c true if any changes were made.
+// FIXME: Move this to llvm::sys::path.
+bool FileManager::removeDotPaths(SmallVectorImpl<char> &Path) {
+ using namespace llvm::sys;
+
+ SmallVector<StringRef, 16> ComponentStack;
+ StringRef P(Path.data(), Path.size());
+
+ // Skip the root path, then look for traversal in the components.
+ StringRef Rel = path::relative_path(P);
+ bool AnyDots = false;
+ for (StringRef C : llvm::make_range(path::begin(Rel), path::end(Rel))) {
+ if (C == ".") {
+ AnyDots = true;
+ continue;
+ }
+ ComponentStack.push_back(C);
+ }
+
+ if (!AnyDots)
+ return false;
+
+ SmallString<256> Buffer = path::root_path(P);
+ for (StringRef C : ComponentStack)
+ path::append(Buffer, C);
+
+ Path.swap(Buffer);
+ return true;
+}
+
StringRef FileManager::getCanonicalName(const DirectoryEntry *Dir) {
// FIXME: use llvm::sys::fs::canonical() when it gets implemented
-#ifdef LLVM_ON_UNIX
llvm::DenseMap<const DirectoryEntry *, llvm::StringRef>::iterator Known
= CanonicalDirNames.find(Dir);
if (Known != CanonicalDirNames.end())
return Known->second;
StringRef CanonicalName(Dir->getName());
+
+#ifdef LLVM_ON_UNIX
char CanonicalNameBuf[PATH_MAX];
if (realpath(Dir->getName(), CanonicalNameBuf)) {
unsigned Len = strlen(CanonicalNameBuf);
@@ -528,12 +561,15 @@ StringRef FileManager::getCanonicalName(const DirectoryEntry *Dir) {
memcpy(Mem, CanonicalNameBuf, Len);
CanonicalName = StringRef(Mem, Len);
}
+#else
+ SmallString<256> CanonicalNameBuf(CanonicalName);
+ llvm::sys::fs::make_absolute(CanonicalNameBuf);
+ llvm::sys::path::native(CanonicalNameBuf);
+ removeDotPaths(CanonicalNameBuf);
+#endif
CanonicalDirNames.insert(std::make_pair(Dir, CanonicalName));
return CanonicalName;
-#else
- return StringRef(Dir->getName());
-#endif
}
void FileManager::PrintStats() const {
diff --git a/lib/Basic/FileSystemStatCache.cpp b/lib/Basic/FileSystemStatCache.cpp
index 7515cfb440af..83e42bdb8483 100644
--- a/lib/Basic/FileSystemStatCache.cpp
+++ b/lib/Basic/FileSystemStatCache.cpp
@@ -78,21 +78,20 @@ bool FileSystemStatCache::get(const char *Path, FileData &Data, bool isFile,
//
// Because of this, check to see if the file exists with 'open'. If the
// open succeeds, use fstat to get the stat info.
- std::unique_ptr<vfs::File> OwnedFile;
- std::error_code EC = FS.openFileForRead(Path, OwnedFile);
+ auto OwnedFile = FS.openFileForRead(Path);
- if (EC) {
+ if (!OwnedFile) {
// If the open fails, our "stat" fails.
R = CacheMissing;
} else {
// Otherwise, the open succeeded. Do an fstat to get the information
// about the file. We'll end up returning the open file descriptor to the
// client to do what they please with it.
- llvm::ErrorOr<vfs::Status> Status = OwnedFile->status();
+ llvm::ErrorOr<vfs::Status> Status = (*OwnedFile)->status();
if (Status) {
R = CacheExists;
copyStatusToFileData(*Status, Data);
- *F = std::move(OwnedFile);
+ *F = std::move(*OwnedFile);
} else {
// fstat rarely fails. If it does, claim the initial open didn't
// succeed.
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
index 219845996323..613b43fce95f 100644
--- a/lib/Basic/IdentifierTable.cpp
+++ b/lib/Basic/IdentifierTable.cpp
@@ -110,49 +110,59 @@ namespace {
HALFSUPPORT = 0x04000,
KEYALL = (0xffff & ~KEYNOMS) // Because KEYNOMS is used to exclude.
};
+
+ /// \brief How a keyword is treated in the selected standard.
+ enum KeywordStatus {
+ KS_Disabled, // Disabled
+ KS_Extension, // Is an extension
+ KS_Enabled, // Enabled
+ KS_Future // Is a keyword in future standard
+ };
+}
+
+/// \brief Translates flags as specified in TokenKinds.def into keyword status
+/// in the given language standard.
+static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
+ unsigned Flags) {
+ if (Flags == KEYALL) return KS_Enabled;
+ if (LangOpts.CPlusPlus && (Flags & KEYCXX)) return KS_Enabled;
+ if (LangOpts.CPlusPlus11 && (Flags & KEYCXX11)) return KS_Enabled;
+ if (LangOpts.C99 && (Flags & KEYC99)) return KS_Enabled;
+ if (LangOpts.GNUKeywords && (Flags & KEYGNU)) return KS_Extension;
+ if (LangOpts.MicrosoftExt && (Flags & KEYMS)) return KS_Extension;
+ if (LangOpts.Borland && (Flags & KEYBORLAND)) return KS_Extension;
+ if (LangOpts.Bool && (Flags & BOOLSUPPORT)) return KS_Enabled;
+ if (LangOpts.Half && (Flags & HALFSUPPORT)) return KS_Enabled;
+ if (LangOpts.WChar && (Flags & WCHARSUPPORT)) return KS_Enabled;
+ if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) return KS_Enabled;
+ if (LangOpts.OpenCL && (Flags & KEYOPENCL)) return KS_Enabled;
+ if (!LangOpts.CPlusPlus && (Flags & KEYNOCXX)) return KS_Enabled;
+ if (LangOpts.C11 && (Flags & KEYC11)) return KS_Enabled;
+ // We treat bridge casts as objective-C keywords so we can warn on them
+ // in non-arc mode.
+ if (LangOpts.ObjC2 && (Flags & KEYARC)) return KS_Enabled;
+ if (LangOpts.CPlusPlus && (Flags & KEYCXX11)) return KS_Future;
+ return KS_Disabled;
}
/// AddKeyword - This method is used to associate a token ID with specific
/// identifiers because they are language keywords. This causes the lexer to
/// automatically map matching identifiers to specialized token codes.
-///
-/// The C90/C99/CPP/CPP0x flags are set to 3 if the token is a keyword in a
-/// future language standard, set to 2 if the token should be enabled in the
-/// specified language, set to 1 if it is an extension in the specified
-/// language, and set to 0 if disabled in the specified language.
static void AddKeyword(StringRef Keyword,
tok::TokenKind TokenCode, unsigned Flags,
const LangOptions &LangOpts, IdentifierTable &Table) {
- unsigned AddResult = 0;
- if (Flags == KEYALL) AddResult = 2;
- else if (LangOpts.CPlusPlus && (Flags & KEYCXX)) AddResult = 2;
- else if (LangOpts.CPlusPlus11 && (Flags & KEYCXX11)) AddResult = 2;
- else if (LangOpts.C99 && (Flags & KEYC99)) AddResult = 2;
- else if (LangOpts.GNUKeywords && (Flags & KEYGNU)) AddResult = 1;
- else if (LangOpts.MicrosoftExt && (Flags & KEYMS)) AddResult = 1;
- else if (LangOpts.Borland && (Flags & KEYBORLAND)) AddResult = 1;
- else if (LangOpts.Bool && (Flags & BOOLSUPPORT)) AddResult = 2;
- else if (LangOpts.Half && (Flags & HALFSUPPORT)) AddResult = 2;
- else if (LangOpts.WChar && (Flags & WCHARSUPPORT)) AddResult = 2;
- else if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) AddResult = 2;
- else if (LangOpts.OpenCL && (Flags & KEYOPENCL)) AddResult = 2;
- else if (!LangOpts.CPlusPlus && (Flags & KEYNOCXX)) AddResult = 2;
- else if (LangOpts.C11 && (Flags & KEYC11)) AddResult = 2;
- // We treat bridge casts as objective-C keywords so we can warn on them
- // in non-arc mode.
- else if (LangOpts.ObjC2 && (Flags & KEYARC)) AddResult = 2;
- else if (LangOpts.CPlusPlus && (Flags & KEYCXX11)) AddResult = 3;
+ KeywordStatus AddResult = getKeywordStatus(LangOpts, Flags);
// Don't add this keyword under MSVCCompat.
if (LangOpts.MSVCCompat && (Flags & KEYNOMS))
return;
// Don't add this keyword if disabled in this language.
- if (AddResult == 0) return;
+ if (AddResult == KS_Disabled) return;
IdentifierInfo &Info =
- Table.get(Keyword, AddResult == 3 ? tok::identifier : TokenCode);
- Info.setIsExtensionToken(AddResult == 1);
- Info.setIsCXX11CompatKeyword(AddResult == 3);
+ Table.get(Keyword, AddResult == KS_Future ? tok::identifier : TokenCode);
+ Info.setIsExtensionToken(AddResult == KS_Extension);
+ Info.setIsCXX11CompatKeyword(AddResult == KS_Future);
}
/// AddCXXOperatorKeyword - Register a C++ operator keyword alternative
@@ -199,6 +209,31 @@ void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
LangOpts, *this);
}
+/// \brief Checks if the specified token kind represents a keyword in the
+/// specified language.
+/// \returns Status of the keyword in the language.
+static KeywordStatus getTokenKwStatus(const LangOptions &LangOpts,
+ tok::TokenKind K) {
+ switch (K) {
+#define KEYWORD(NAME, FLAGS) \
+ case tok::kw_##NAME: return getKeywordStatus(LangOpts, FLAGS);
+#include "clang/Basic/TokenKinds.def"
+ default: return KS_Disabled;
+ }
+}
+
+/// \brief Returns true if the identifier represents a keyword in the
+/// specified language.
+bool IdentifierInfo::isKeyword(const LangOptions &LangOpts) {
+ switch (getTokenKwStatus(LangOpts, getTokenID())) {
+ case KS_Enabled:
+ case KS_Extension:
+ return true;
+ default:
+ return false;
+ }
+}
+
tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
// We use a perfect hash function here involving the length of the keyword,
// the first and third character. For preprocessor ID's there are no
@@ -428,6 +463,7 @@ ObjCMethodFamily Selector::getMethodFamilyImpl(Selector sel) {
if (name == "retain") return OMF_retain;
if (name == "retainCount") return OMF_retainCount;
if (name == "self") return OMF_self;
+ if (name == "initialize") return OMF_initialize;
}
if (name == "performSelector") return OMF_performSelector;
@@ -486,6 +522,33 @@ ObjCInstanceTypeFamily Selector::getInstTypeMethodFamily(Selector sel) {
return OIT_None;
}
+ObjCStringFormatFamily Selector::getStringFormatFamilyImpl(Selector sel) {
+ IdentifierInfo *first = sel.getIdentifierInfoForSlot(0);
+ if (!first) return SFF_None;
+
+ StringRef name = first->getName();
+
+ switch (name.front()) {
+ case 'a':
+ if (name == "appendFormat") return SFF_NSString;
+ break;
+
+ case 'i':
+ if (name == "initWithFormat") return SFF_NSString;
+ break;
+
+ case 'l':
+ if (name == "localizedStringWithFormat") return SFF_NSString;
+ break;
+
+ case 's':
+ if (name == "stringByAppendingFormat" ||
+ name == "stringWithFormat") return SFF_NSString;
+ break;
+ }
+ return SFF_None;
+}
+
namespace {
struct SelectorTableImpl {
llvm::FoldingSet<MultiKeywordSelector> Table;
diff --git a/lib/Basic/LangOptions.cpp b/lib/Basic/LangOptions.cpp
index f8714b2389cb..dcbd22817111 100644
--- a/lib/Basic/LangOptions.cpp
+++ b/lib/Basic/LangOptions.cpp
@@ -14,14 +14,10 @@
using namespace clang;
-const SanitizerOptions SanitizerOptions::Disabled = {};
-
LangOptions::LangOptions() {
#define LANGOPT(Name, Bits, Default, Description) Name = Default;
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) set##Name(Default);
#include "clang/Basic/LangOptions.def"
-
- Sanitize = SanitizerOptions::Disabled;
}
void LangOptions::resetNonModularOptions() {
@@ -33,8 +29,10 @@ void LangOptions::resetNonModularOptions() {
// FIXME: This should not be reset; modules can be different with different
// sanitizer options (this affects __has_feature(address_sanitizer) etc).
- Sanitize = SanitizerOptions::Disabled;
+ Sanitize.clear();
+ SanitizerBlacklistFile.clear();
CurrentModule.clear();
+ ImplementationOfModule.clear();
}
diff --git a/lib/Basic/Module.cpp b/lib/Basic/Module.cpp
index f689c733d9c0..03f9bd3f329c 100644
--- a/lib/Basic/Module.cpp
+++ b/lib/Basic/Module.cpp
@@ -25,8 +25,8 @@
using namespace clang;
Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
- const FileEntry *File, bool IsFramework, bool IsExplicit)
- : Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent), ModuleMap(File),
+ bool IsFramework, bool IsExplicit)
+ : Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent), Directory(),
Umbrella(), ASTFile(nullptr), IsMissingRequirement(false),
IsAvailable(true), IsFromModuleFile(false), IsFramework(IsFramework),
IsExplicit(IsExplicit), IsSystem(false), IsExternC(false),
@@ -70,9 +70,9 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
.Default(Target.hasFeature(Feature));
}
-bool
-Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
- Requirement &Req, HeaderDirective &MissingHeader) const {
+bool Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
+ Requirement &Req,
+ UnresolvedHeaderDirective &MissingHeader) const {
if (IsAvailable)
return true;
@@ -293,9 +293,12 @@ void Module::print(raw_ostream &OS, unsigned Indent) const {
OS << "explicit ";
OS << "module " << Name;
- if (IsSystem) {
+ if (IsSystem || IsExternC) {
OS.indent(Indent + 2);
- OS << " [system]";
+ if (IsSystem)
+ OS << " [system]";
+ if (IsExternC)
+ OS << " [extern_c]";
}
OS << " {\n";
@@ -338,30 +341,31 @@ void Module::print(raw_ostream &OS, unsigned Indent) const {
OS << "\n";
}
- for (unsigned I = 0, N = NormalHeaders.size(); I != N; ++I) {
- OS.indent(Indent + 2);
- OS << "header \"";
- OS.write_escaped(NormalHeaders[I]->getName());
- OS << "\"\n";
- }
-
- for (unsigned I = 0, N = ExcludedHeaders.size(); I != N; ++I) {
- OS.indent(Indent + 2);
- OS << "exclude header \"";
- OS.write_escaped(ExcludedHeaders[I]->getName());
- OS << "\"\n";
+ struct {
+ StringRef Prefix;
+ HeaderKind Kind;
+ } Kinds[] = {{"", HK_Normal},
+ {"textual ", HK_Textual},
+ {"private ", HK_Private},
+ {"private textual ", HK_PrivateTextual},
+ {"exclude ", HK_Excluded}};
+
+ for (auto &K : Kinds) {
+ for (auto &H : Headers[K.Kind]) {
+ OS.indent(Indent + 2);
+ OS << K.Prefix << "header \"";
+ OS.write_escaped(H.NameAsWritten);
+ OS << "\"\n";
+ }
}
- for (unsigned I = 0, N = PrivateHeaders.size(); I != N; ++I) {
- OS.indent(Indent + 2);
- OS << "private header \"";
- OS.write_escaped(PrivateHeaders[I]->getName());
- OS << "\"\n";
- }
-
for (submodule_const_iterator MI = submodule_begin(), MIEnd = submodule_end();
MI != MIEnd; ++MI)
- if (!(*MI)->IsInferred)
+ // Print inferred subframework modules so that we don't need to re-infer
+ // them (requires expensive directory iteration + stat calls) when we build
+ // the module. Regular inferred submodules are OK, as we need to look at all
+ // those header files anyway.
+ if (!(*MI)->IsInferred || (*MI)->IsFramework)
(*MI)->print(OS, Indent + 2);
for (unsigned I = 0, N = Exports.size(); I != N; ++I) {
diff --git a/lib/Basic/OpenMPKinds.cpp b/lib/Basic/OpenMPKinds.cpp
index 06f010f7ceb4..6e98d48c27d3 100644
--- a/lib/Basic/OpenMPKinds.cpp
+++ b/lib/Basic/OpenMPKinds.cpp
@@ -46,6 +46,10 @@ const char *clang::getOpenMPDirectiveName(OpenMPDirectiveKind Kind) {
}
OpenMPClauseKind clang::getOpenMPClauseKind(StringRef Str) {
+ // 'flush' clause cannot be specified explicitly, because this is an implicit
+ // clause for 'flush' directive. If the 'flush' clause is explicitly specified
+ // the Parser should generate a warning about extra tokens at the end of the
+ // directive.
if (Str == "flush")
return OMPC_unknown;
return llvm::StringSwitch<OpenMPClauseKind>(Str)
@@ -108,6 +112,11 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_untied:
case OMPC_mergeable:
case OMPC_flush:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
break;
}
llvm_unreachable("Invalid OpenMP simple clause kind");
@@ -167,6 +176,11 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_untied:
case OMPC_mergeable:
case OMPC_flush:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
break;
}
llvm_unreachable("Invalid OpenMP simple clause kind");
@@ -207,6 +221,16 @@ bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
break;
}
break;
+ case OMPD_for_simd:
+ switch (CKind) {
+#define OPENMP_FOR_SIMD_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
case OMPD_sections:
switch (CKind) {
#define OPENMP_SECTIONS_CLAUSE(Name) \
@@ -237,6 +261,16 @@ bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
break;
}
break;
+ case OMPD_parallel_for_simd:
+ switch (CKind) {
+#define OPENMP_PARALLEL_FOR_SIMD_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
case OMPD_parallel_sections:
switch (CKind) {
#define OPENMP_PARALLEL_SECTIONS_CLAUSE(Name) \
@@ -260,6 +294,36 @@ bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
case OMPD_flush:
return CKind == OMPC_flush;
break;
+ case OMPD_atomic:
+ switch (CKind) {
+#define OPENMP_ATOMIC_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_target:
+ switch (CKind) {
+#define OPENMP_TARGET_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_teams:
+ switch (CKind) {
+#define OPENMP_TEAMS_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
case OMPD_unknown:
case OMPD_threadprivate:
case OMPD_section:
@@ -268,29 +332,39 @@ bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
+ case OMPD_ordered:
break;
}
return false;
}
bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
- return DKind == OMPD_simd || DKind == OMPD_for ||
- DKind == OMPD_parallel_for; // TODO add next directives.
+ return DKind == OMPD_simd || DKind == OMPD_for || DKind == OMPD_for_simd ||
+ DKind == OMPD_parallel_for ||
+ DKind == OMPD_parallel_for_simd; // TODO add next directives.
}
bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
- return DKind == OMPD_for || DKind == OMPD_sections || DKind == OMPD_section ||
+ return DKind == OMPD_for || DKind == OMPD_for_simd ||
+ DKind == OMPD_sections || DKind == OMPD_section ||
DKind == OMPD_single || DKind == OMPD_parallel_for ||
+ DKind == OMPD_parallel_for_simd ||
DKind == OMPD_parallel_sections; // TODO add next directives.
}
bool clang::isOpenMPParallelDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_parallel || DKind == OMPD_parallel_for ||
+ DKind == OMPD_parallel_for_simd ||
DKind == OMPD_parallel_sections; // TODO add next directives.
}
+bool clang::isOpenMPTeamsDirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_teams; // TODO add next directives.
+}
+
bool clang::isOpenMPSimdDirective(OpenMPDirectiveKind DKind) {
- return DKind == OMPD_simd; // TODO || DKind == OMPD_for_simd || ...
+ return DKind == OMPD_simd || DKind == OMPD_for_simd ||
+ DKind == OMPD_parallel_for_simd; // TODO add next directives.
}
bool clang::isOpenMPPrivate(OpenMPClauseKind Kind) {
diff --git a/lib/Basic/SanitizerBlacklist.cpp b/lib/Basic/SanitizerBlacklist.cpp
new file mode 100644
index 000000000000..ea5b8d0da845
--- /dev/null
+++ b/lib/Basic/SanitizerBlacklist.cpp
@@ -0,0 +1,46 @@
+//===--- SanitizerBlacklist.cpp - Blacklist for sanitizers ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// User-provided blacklist used to disable/alter instrumentation done in
+// sanitizers.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/SanitizerBlacklist.h"
+
+using namespace clang;
+
+SanitizerBlacklist::SanitizerBlacklist(StringRef BlacklistPath,
+ SourceManager &SM)
+ : SCL(llvm::SpecialCaseList::createOrDie(BlacklistPath)), SM(SM) {}
+
+bool SanitizerBlacklist::isBlacklistedGlobal(StringRef GlobalName,
+ StringRef Category) const {
+ return SCL->inSection("global", GlobalName, Category);
+}
+
+bool SanitizerBlacklist::isBlacklistedType(StringRef MangledTypeName,
+ StringRef Category) const {
+ return SCL->inSection("type", MangledTypeName, Category);
+}
+
+bool SanitizerBlacklist::isBlacklistedFunction(StringRef FunctionName) const {
+ return SCL->inSection("fun", FunctionName);
+}
+
+bool SanitizerBlacklist::isBlacklistedFile(StringRef FileName,
+ StringRef Category) const {
+ return SCL->inSection("src", FileName, Category);
+}
+
+bool SanitizerBlacklist::isBlacklistedLocation(SourceLocation Loc,
+ StringRef Category) const {
+ return !Loc.isInvalid() &&
+ isBlacklistedFile(SM.getFilename(SM.getFileLoc(Loc)), Category);
+}
+
diff --git a/lib/Basic/Sanitizers.cpp b/lib/Basic/Sanitizers.cpp
new file mode 100644
index 000000000000..e9aaa366e32b
--- /dev/null
+++ b/lib/Basic/Sanitizers.cpp
@@ -0,0 +1,35 @@
+//===--- Sanitizers.cpp - C Language Family Language Options ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes from Sanitizers.h
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/Sanitizers.h"
+
+using namespace clang;
+
+SanitizerSet::SanitizerSet() : Kinds(0) {}
+
+bool SanitizerSet::has(SanitizerKind K) const {
+ unsigned Bit = static_cast<unsigned>(K);
+ return Kinds & (1 << Bit);
+}
+
+void SanitizerSet::set(SanitizerKind K, bool Value) {
+ unsigned Bit = static_cast<unsigned>(K);
+ Kinds = Value ? (Kinds | (1 << Bit)) : (Kinds & ~(1 << Bit));
+}
+
+void SanitizerSet::clear() {
+ Kinds = 0;
+}
+
+bool SanitizerSet::empty() const {
+ return Kinds == 0;
+}
diff --git a/lib/Basic/SourceLocation.cpp b/lib/Basic/SourceLocation.cpp
index 0c06a48c123a..6b885a7bc806 100644
--- a/lib/Basic/SourceLocation.cpp
+++ b/lib/Basic/SourceLocation.cpp
@@ -132,13 +132,9 @@ const char *FullSourceLoc::getCharacterData(bool *Invalid) const {
return SrcMgr->getCharacterData(*this, Invalid);
}
-const llvm::MemoryBuffer* FullSourceLoc::getBuffer(bool *Invalid) const {
- assert(isValid());
- return SrcMgr->getBuffer(SrcMgr->getFileID(*this), Invalid);
-}
-
StringRef FullSourceLoc::getBufferData(bool *Invalid) const {
- return getBuffer(Invalid)->getBuffer();
+ assert(isValid());
+ return SrcMgr->getBuffer(SrcMgr->getFileID(*this), Invalid)->getBuffer();;
}
std::pair<FileID, unsigned> FullSourceLoc::getDecomposedLoc() const {
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index 61dfe35e2257..305dcd43960c 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -94,11 +94,9 @@ llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
return Buffer.getPointer();
}
- std::string ErrorStr;
bool isVolatile = SM.userFilesAreVolatile() && !IsSystemFile;
- Buffer.setPointer(SM.getFileManager().getBufferForFile(ContentsEntry,
- &ErrorStr,
- isVolatile));
+ auto BufferOrError =
+ SM.getFileManager().getBufferForFile(ContentsEntry, isVolatile);
// If we were unable to open the file, then we are in an inconsistent
// situation where the content cache referenced a file which no longer
@@ -110,27 +108,30 @@ llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
// currently handle returning a null entry here. Ideally we should detect
// that we are in an inconsistent situation and error out as quickly as
// possible.
- if (!Buffer.getPointer()) {
- const StringRef FillStr("<<<MISSING SOURCE FILE>>>\n");
- Buffer.setPointer(MemoryBuffer::getNewMemBuffer(ContentsEntry->getSize(),
- "<invalid>"));
+ if (!BufferOrError) {
+ StringRef FillStr("<<<MISSING SOURCE FILE>>>\n");
+ Buffer.setPointer(MemoryBuffer::getNewMemBuffer(ContentsEntry->getSize(),
+ "<invalid>").release());
char *Ptr = const_cast<char*>(Buffer.getPointer()->getBufferStart());
for (unsigned i = 0, e = ContentsEntry->getSize(); i != e; ++i)
Ptr[i] = FillStr[i % FillStr.size()];
if (Diag.isDiagnosticInFlight())
- Diag.SetDelayedDiagnostic(diag::err_cannot_open_file,
- ContentsEntry->getName(), ErrorStr);
- else
+ Diag.SetDelayedDiagnostic(diag::err_cannot_open_file,
+ ContentsEntry->getName(),
+ BufferOrError.getError().message());
+ else
Diag.Report(Loc, diag::err_cannot_open_file)
- << ContentsEntry->getName() << ErrorStr;
+ << ContentsEntry->getName() << BufferOrError.getError().message();
Buffer.setInt(Buffer.getInt() | InvalidFlag);
if (Invalid) *Invalid = true;
return Buffer.getPointer();
}
-
+
+ Buffer.setPointer(BufferOrError->release());
+
// Check that the file's size is the same as in the file entry (which may
// have come from a stat cache).
if (getRawBuffer()->getBufferSize() != (size_t)ContentsEntry->getSize()) {
@@ -176,17 +177,11 @@ llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
}
unsigned LineTableInfo::getLineTableFilenameID(StringRef Name) {
- // Look up the filename in the string table, returning the pre-existing value
- // if it exists.
- llvm::StringMapEntry<unsigned> &Entry =
- FilenameIDs.GetOrCreateValue(Name, ~0U);
- if (Entry.getValue() != ~0U)
- return Entry.getValue();
-
- // Otherwise, assign this the next available ID.
- Entry.setValue(FilenamesByID.size());
- FilenamesByID.push_back(&Entry);
- return FilenamesByID.size()-1;
+ auto IterBool =
+ FilenameIDs.insert(std::make_pair(Name, FilenamesByID.size()));
+ if (IterBool.second)
+ FilenamesByID.push_back(&*IterBool.first);
+ return IterBool.first->second;
}
/// AddLineNote - Add a line note to the line table that indicates that there
@@ -373,8 +368,7 @@ SourceManager::SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr,
: Diag(Diag), FileMgr(FileMgr), OverridenFilesKeepOriginalName(true),
UserFilesAreVolatile(UserFilesAreVolatile),
ExternalSLocEntries(nullptr), LineTable(nullptr), NumLinearScans(0),
- NumBinaryProbes(0), FakeBufferForRecovery(nullptr),
- FakeContentCacheForRecovery(nullptr) {
+ NumBinaryProbes(0) {
clearIDTables();
Diag.setSourceManager(this);
}
@@ -398,9 +392,6 @@ SourceManager::~SourceManager() {
ContentCacheAlloc.Deallocate(I->second);
}
}
-
- delete FakeBufferForRecovery;
- delete FakeContentCacheForRecovery;
llvm::DeleteContainerSeconds(MacroArgsCacheMap);
}
@@ -460,13 +451,13 @@ SourceManager::getOrCreateContentCache(const FileEntry *FileEnt,
/// createMemBufferContentCache - Create a new ContentCache for the specified
/// memory buffer. This does no caching.
-const ContentCache *
-SourceManager::createMemBufferContentCache(llvm::MemoryBuffer *Buffer) {
+const ContentCache *SourceManager::createMemBufferContentCache(
+ std::unique_ptr<llvm::MemoryBuffer> Buffer) {
// Add a new ContentCache to the MemBufferInfos list and return it.
ContentCache *Entry = ContentCacheAlloc.Allocate<ContentCache>();
new (Entry) ContentCache();
MemBufferInfos.push_back(Entry);
- Entry->setBuffer(Buffer);
+ Entry->setBuffer(std::move(Buffer));
return Entry;
}
@@ -505,10 +496,10 @@ SourceManager::AllocateLoadedSLocEntries(unsigned NumSLocEntries,
/// fake, non-empty buffer.
llvm::MemoryBuffer *SourceManager::getFakeBufferForRecovery() const {
if (!FakeBufferForRecovery)
- FakeBufferForRecovery
- = llvm::MemoryBuffer::getMemBuffer("<<<INVALID BUFFER>>");
-
- return FakeBufferForRecovery;
+ FakeBufferForRecovery =
+ llvm::MemoryBuffer::getMemBuffer("<<<INVALID BUFFER>>");
+
+ return FakeBufferForRecovery.get();
}
/// \brief As part of recovering from missing or changed content, produce a
@@ -516,11 +507,11 @@ llvm::MemoryBuffer *SourceManager::getFakeBufferForRecovery() const {
const SrcMgr::ContentCache *
SourceManager::getFakeContentCacheForRecovery() const {
if (!FakeContentCacheForRecovery) {
- FakeContentCacheForRecovery = new ContentCache();
+ FakeContentCacheForRecovery = llvm::make_unique<SrcMgr::ContentCache>();
FakeContentCacheForRecovery->replaceBuffer(getFakeBufferForRecovery(),
/*DoNotFree=*/true);
}
- return FakeContentCacheForRecovery;
+ return FakeContentCacheForRecovery.get();
}
/// \brief Returns the previous in-order FileID or an invalid FileID if there
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
index aecf13b00854..6987cd71f3d7 100644
--- a/lib/Basic/TargetInfo.cpp
+++ b/lib/Basic/TargetInfo.cpp
@@ -338,6 +338,8 @@ bool TargetInfo::isValidGCCRegisterName(StringRef Name) const {
// Get rid of any register prefix.
Name = removeGCCRegisterPrefix(Name);
+ if (Name.empty())
+ return false;
getGCCRegNames(Names, NumNames);
@@ -457,7 +459,9 @@ bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const {
// Eventually, an unknown constraint should just be treated as 'g'.
return false;
}
+ break;
case '&': // early clobber.
+ Info.setEarlyClobber();
break;
case '%': // commutative.
// FIXME: Check that there is a another register after this one.
@@ -482,9 +486,12 @@ bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const {
if (Name[1] == '=' || Name[1] == '+')
Name++;
break;
+ case '#': // Ignore as constraint.
+ while (Name[1] && Name[1] != ',')
+ Name++;
+ break;
case '?': // Disparage slightly code.
case '!': // Disparage severely.
- case '#': // Ignore as constraint.
case '*': // Ignore for choosing register preferences.
break; // Pass them.
}
@@ -492,6 +499,11 @@ bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const {
Name++;
}
+ // Early clobber with a read-write constraint which doesn't permit registers
+ // is invalid.
+ if (Info.earlyClobber() && Info.isReadWrite() && !Info.allowsRegister())
+ return false;
+
// If a constraint allows neither memory nor register operands it contains
// only modifiers. Reject it.
return Info.allowsMemory() || Info.allowsRegister();
@@ -534,11 +546,17 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
default:
// Check if we have a matching constraint
if (*Name >= '0' && *Name <= '9') {
- unsigned i = *Name - '0';
+ const char *DigitStart = Name;
+ while (Name[1] >= '0' && Name[1] <= '9')
+ Name++;
+ const char *DigitEnd = Name;
+ unsigned i;
+ if (StringRef(DigitStart, DigitEnd - DigitStart + 1)
+ .getAsInteger(10, i))
+ return false;
// Check if matching constraint is out of bounds.
- if (i >= NumOutputs)
- return false;
+ if (i >= NumOutputs) return false;
// A number must refer to an output only operand.
if (OutputConstraints[i].isReadWrite())
@@ -569,6 +587,10 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
if (Info.hasTiedOperand() && Info.getTiedOperand() != Index)
return false;
+ // A number must refer to an output only operand.
+ if (OutputConstraints[Index].isReadWrite())
+ return false;
+
Info.setTiedOperand(Index, OutputConstraints[Index]);
break;
}
@@ -586,6 +608,8 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
case 'N':
case 'O':
case 'P':
+ if (!validateAsmConstraint(Name, Info))
+ return false;
break;
case 'r': // general register.
Info.setAllowsRegister();
@@ -608,9 +632,12 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
break;
case ',': // multiple alternative constraint. Ignore comma.
break;
+ case '#': // Ignore as constraint.
+ while (Name[1] && Name[1] != ',')
+ Name++;
+ break;
case '?': // Disparage slightly code.
case '!': // Disparage severely.
- case '#': // Ignore as constraint.
case '*': // Ignore for choosing register preferences.
break; // Pass them.
}
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 075f905cc359..4c64a02b5b8a 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -21,6 +21,7 @@
#include "clang/Basic/TargetOptions.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
@@ -93,7 +94,8 @@ static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
Builder.defineMacro("OBJC_NEW_PROPERTIES");
// AddressSanitizer doesn't play well with source fortification, which is on
// by default on Darwin.
- if (Opts.Sanitize.Address) Builder.defineMacro("_FORTIFY_SOURCE", "0");
+ if (Opts.Sanitize.has(SanitizerKind::Address))
+ Builder.defineMacro("_FORTIFY_SOURCE", "0");
if (!Opts.ObjCAutoRefCount) {
// __weak is always defined, for use in blocks and with objc pointers.
@@ -271,6 +273,12 @@ protected:
// On FreeBSD, wchar_t contains the number of the code point as
// used by the character set of the locale. These character sets are
// not necessarily a superset of ASCII.
+ //
+ // FIXME: This is wrong; the macro refers to the numerical values
+ // of wchar_t *literals*, which are not locale-dependent. However,
+ // FreeBSD systems apparently depend on us getting this wrong, and
+ // setting this to 1 is conforming even if all the basic source
+ // character literals have the same encoding as char and wchar_t.
Builder.defineMacro("__STDC_MB_MIGHT_NEQ_WC__", "1");
}
public:
@@ -315,7 +323,8 @@ protected:
Builder.defineMacro("_GNU_SOURCE");
}
public:
- KFreeBSDTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ KFreeBSDTargetInfo(const llvm::Triple &Triple)
+ : OSTargetInfo<Target>(Triple) {
this->UserLabelPrefix = "";
}
};
@@ -516,27 +525,6 @@ public:
}
};
-// AuroraUX target
-template<typename Target>
-class AuroraUXTargetInfo : public OSTargetInfo<Target> {
-protected:
- void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
- MacroBuilder &Builder) const override {
- DefineStd(Builder, "sun", Opts);
- DefineStd(Builder, "unix", Opts);
- Builder.defineMacro("__ELF__");
- Builder.defineMacro("__svr4__");
- Builder.defineMacro("__SVR4");
- }
-public:
- AuroraUXTargetInfo(const llvm::Triple &Triple)
- : OSTargetInfo<Target>(Triple) {
- this->UserLabelPrefix = "";
- this->WCharType = this->SignedLong;
- // FIXME: WIntType should be SignedLong
- }
-};
-
// Solaris target
template<typename Target>
class SolarisTargetInfo : public OSTargetInfo<Target> {
@@ -551,8 +539,8 @@ protected:
// Solaris headers require _XOPEN_SOURCE to be set to 600 for C99 and
// newer, but to 500 for everything else. feature_test.h has a check to
// ensure that you are not using C99 with an old version of X/Open or C89
- // with a new version.
- if (Opts.C99 || Opts.C11)
+ // with a new version.
+ if (Opts.C99)
Builder.defineMacro("_XOPEN_SOURCE", "600");
else
Builder.defineMacro("_XOPEN_SOURCE", "500");
@@ -658,7 +646,8 @@ public:
// RegParmMax is inherited from the underlying architecture
this->LongDoubleFormat = &llvm::APFloat::IEEEdouble;
if (Triple.getArch() == llvm::Triple::arm) {
- this->DescriptionString = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S128";
+ this->DescriptionString =
+ "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S128";
} else if (Triple.getArch() == llvm::Triple::x86) {
this->DescriptionString = "e-m:e-p:32:32-i64:64-n8:16:32-S128";
} else if (Triple.getArch() == llvm::Triple::x86_64) {
@@ -692,10 +681,14 @@ class PPCTargetInfo : public TargetInfo {
// Target cpu features.
bool HasVSX;
+ bool HasP8Vector;
+
+protected:
+ std::string ABI;
public:
PPCTargetInfo(const llvm::Triple &Triple)
- : TargetInfo(Triple), HasVSX(false) {
+ : TargetInfo(Triple), HasVSX(false), HasP8Vector(false) {
BigEndian = (Triple.getArch() != llvm::Triple::ppc64le);
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble;
@@ -781,6 +774,9 @@ public:
return CPUKnown;
}
+
+ StringRef getABI() const override { return ABI; }
+
void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const override {
Records = BuiltinInfo;
@@ -932,11 +928,10 @@ const Builtin::Info PPCTargetInfo::BuiltinInfo[] = {
#include "clang/Basic/BuiltinsPPC.def"
};
- /// handleTargetFeatures - Perform initialization based on the user
+/// handleTargetFeatures - Perform initialization based on the user
/// configured set of features.
bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
- // Remember the maximum enabled sselevel.
for (unsigned i = 0, e = Features.size(); i !=e; ++i) {
// Ignore disabled features.
if (Features[i][0] == '-')
@@ -949,6 +944,11 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
continue;
}
+ if (Feature == "power8-vector") {
+ HasP8Vector = true;
+ continue;
+ }
+
// TODO: Finish this list and add an assert that we've handled them
// all.
}
@@ -976,13 +976,18 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
// Target properties.
if (getTriple().getArch() == llvm::Triple::ppc64le) {
Builder.defineMacro("_LITTLE_ENDIAN");
- Builder.defineMacro("_CALL_ELF","2");
} else {
if (getTriple().getOS() != llvm::Triple::NetBSD &&
getTriple().getOS() != llvm::Triple::OpenBSD)
Builder.defineMacro("_BIG_ENDIAN");
}
+ // ABI options.
+ if (ABI == "elfv1")
+ Builder.defineMacro("_CALL_ELF", "1");
+ if (ABI == "elfv2")
+ Builder.defineMacro("_CALL_ELF", "2");
+
// Subtarget options.
Builder.defineMacro("__NATURAL_ALIGNMENT__");
Builder.defineMacro("__REGISTER_PREFIX__", "");
@@ -1094,6 +1099,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasVSX)
Builder.defineMacro("__VSX__");
+ if (HasP8Vector)
+ Builder.defineMacro("__POWER8_VECTOR__");
// FIXME: The following are not yet generated here by Clang, but are
// generated by GCC:
@@ -1132,13 +1139,19 @@ void PPCTargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
.Default(false);
Features["qpx"] = (CPU == "a2q");
+
+ if (!ABI.empty())
+ Features[ABI] = true;
}
bool PPCTargetInfo::hasFeature(StringRef Feature) const {
- return Feature == "powerpc";
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("powerpc", true)
+ .Case("vsx", HasVSX)
+ .Case("power8-vector", HasP8Vector)
+ .Default(false);
}
-
const char * const PPCTargetInfo::GCCRegNames[] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
@@ -1287,17 +1300,26 @@ public:
IntMaxType = SignedLong;
Int64Type = SignedLong;
- if (getTriple().getOS() == llvm::Triple::FreeBSD) {
+ if ((Triple.getArch() == llvm::Triple::ppc64le)) {
+ DescriptionString = "e-m:e-i64:64-n32:64";
+ ABI = "elfv2";
+ } else {
+ DescriptionString = "E-m:e-i64:64-n32:64";
+ ABI = "elfv1";
+ }
+
+ switch (getTriple().getOS()) {
+ case llvm::Triple::FreeBSD:
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble;
- DescriptionString = "E-m:e-i64:64-n32:64";
- } else {
- if ((Triple.getArch() == llvm::Triple::ppc64le)) {
- DescriptionString = "e-m:e-i64:64-n32:64";
- } else {
- DescriptionString = "E-m:e-i64:64-n32:64";
- }
-}
+ break;
+ case llvm::Triple::NetBSD:
+ IntMaxType = SignedLongLong;
+ Int64Type = SignedLongLong;
+ break;
+ default:
+ break;
+ }
// PPC64 supports atomics up to 8 bytes.
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
@@ -1305,6 +1327,14 @@ public:
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::CharPtrBuiltinVaList;
}
+ // PPC64 Linux-specifc ABI options.
+ bool setABI(const std::string &Name) override {
+ if (Name == "elfv1" || Name == "elfv2") {
+ ABI = Name;
+ return true;
+ }
+ return false;
+ }
};
} // end anonymous namespace.
@@ -1317,7 +1347,7 @@ public:
: DarwinTargetInfo<PPC32TargetInfo>(Triple) {
HasAlignMac68kSupport = true;
BoolWidth = BoolAlign = 32; //XXX support -mone-byte-bool?
- PtrDiffType = SignedInt; // for http://llvm.org/bugs/show_bug.cgi?id=15726
+ PtrDiffType = SignedInt; // for http://llvm.org/bugs/show_bug.cgi?id=15726
LongLongAlign = 32;
SuitableAlign = 128;
DescriptionString = "E-m:o-p:32:32-f64:32:64-n32";
@@ -1344,6 +1374,8 @@ namespace {
1, // opencl_global
3, // opencl_local
4, // opencl_constant
+ // FIXME: generic has to be added to the target
+ 0, // opencl_generic
1, // cuda_device
4, // cuda_constant
3, // cuda_shared
@@ -1351,6 +1383,16 @@ namespace {
class NVPTXTargetInfo : public TargetInfo {
static const char * const GCCRegNames[];
static const Builtin::Info BuiltinInfo[];
+
+ // The GPU profiles supported by the NVPTX backend
+ enum GPUKind {
+ GK_NONE,
+ GK_SM20,
+ GK_SM21,
+ GK_SM30,
+ GK_SM35,
+ } GPU;
+
public:
NVPTXTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
BigEndian = false;
@@ -1361,11 +1403,34 @@ namespace {
// Define available target features
// These must be defined in sorted order!
NoAsmVariants = true;
+ // Set the default GPU to sm20
+ GPU = GK_SM20;
}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
Builder.defineMacro("__PTX__");
Builder.defineMacro("__NVPTX__");
+ if (Opts.CUDAIsDevice) {
+ // Set __CUDA_ARCH__ for the GPU specified.
+ std::string CUDAArchCode;
+ switch (GPU) {
+ case GK_SM20:
+ CUDAArchCode = "200";
+ break;
+ case GK_SM21:
+ CUDAArchCode = "210";
+ break;
+ case GK_SM30:
+ CUDAArchCode = "300";
+ break;
+ case GK_SM35:
+ CUDAArchCode = "350";
+ break;
+ default:
+ llvm_unreachable("Unhandled target CPU");
+ }
+ Builder.defineMacro("__CUDA_ARCH__", CUDAArchCode);
+ }
}
void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const override {
@@ -1384,8 +1449,9 @@ namespace {
Aliases = nullptr;
NumAliases = 0;
}
- bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &Info) const override {
+ bool
+ validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
switch (*Name) {
default: return false;
case 'c':
@@ -1407,14 +1473,14 @@ namespace {
return TargetInfo::CharPtrBuiltinVaList;
}
bool setCPU(const std::string &Name) override {
- bool Valid = llvm::StringSwitch<bool>(Name)
- .Case("sm_20", true)
- .Case("sm_21", true)
- .Case("sm_30", true)
- .Case("sm_35", true)
- .Default(false);
+ GPU = llvm::StringSwitch<GPUKind>(Name)
+ .Case("sm_20", GK_SM20)
+ .Case("sm_21", GK_SM21)
+ .Case("sm_30", GK_SM30)
+ .Case("sm_35", GK_SM35)
+ .Default(GK_NONE);
- return Valid;
+ return GPU != GK_NONE;
}
};
@@ -1462,11 +1528,15 @@ static const unsigned R600AddrSpaceMap[] = {
1, // opencl_global
3, // opencl_local
2, // opencl_constant
+ 4, // opencl_generic
1, // cuda_device
2, // cuda_constant
3 // cuda_shared
};
+// If you edit the description strings, make sure you update
+// getPointerWidthV().
+
static const char *DescriptionStringR600 =
"e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
@@ -1506,6 +1576,20 @@ public:
UseAddrSpaceMapMangling = true;
}
+ uint64_t getPointerWidthV(unsigned AddrSpace) const override {
+ if (GPU <= GK_CAYMAN)
+ return 32;
+
+ switch(AddrSpace) {
+ default:
+ return 64;
+ case 0:
+ case 3:
+ case 5:
+ return 32;
+ }
+ }
+
const char * getClobbers() const override {
return "";
}
@@ -1573,6 +1657,7 @@ public:
.Case("pitcairn", GK_SOUTHERN_ISLANDS)
.Case("verde", GK_SOUTHERN_ISLANDS)
.Case("oland", GK_SOUTHERN_ISLANDS)
+ .Case("hainan", GK_SOUTHERN_ISLANDS)
.Case("bonaire", GK_SEA_ISLANDS)
.Case("kabini", GK_SEA_ISLANDS)
.Case("kaveri", GK_SEA_ISLANDS)
@@ -1669,16 +1754,19 @@ class X86TargetInfo : public TargetInfo {
bool HasPCLMUL;
bool HasLZCNT;
bool HasRDRND;
+ bool HasFSGSBASE;
bool HasBMI;
bool HasBMI2;
bool HasPOPCNT;
bool HasRTM;
bool HasPRFCHW;
bool HasRDSEED;
+ bool HasADX;
bool HasTBM;
bool HasFMA;
bool HasF16C;
- bool HasAVX512CD, HasAVX512ER, HasAVX512PF;
+ bool HasAVX512CD, HasAVX512ER, HasAVX512PF, HasAVX512DQ, HasAVX512BW,
+ HasAVX512VL;
bool HasSHA;
bool HasCX16;
@@ -1753,18 +1841,37 @@ class X86TargetInfo : public TargetInfo {
/// \name Atom
/// Atom processors
//@{
- CK_Atom,
+ CK_Bonnell,
CK_Silvermont,
//@}
/// \name Nehalem
/// Nehalem microarchitecture based processors.
- //@{
- CK_Corei7,
- CK_Corei7AVX,
- CK_CoreAVXi,
- CK_CoreAVX2,
- //@}
+ CK_Nehalem,
+
+ /// \name Westmere
+ /// Westmere microarchitecture based processors.
+ CK_Westmere,
+
+ /// \name Sandy Bridge
+ /// Sandy Bridge microarchitecture based processors.
+ CK_SandyBridge,
+
+ /// \name Ivy Bridge
+ /// Ivy Bridge microarchitecture based processors.
+ CK_IvyBridge,
+
+ /// \name Haswell
+ /// Haswell microarchitecture based processors.
+ CK_Haswell,
+
+ /// \name Broadwell
+ /// Broadwell microarchitecture based processors.
+ CK_Broadwell,
+
+ /// \name Skylake
+ /// Skylake microarchitecture based processors.
+ CK_Skylake,
/// \name Knights Landing
/// Knights Landing processor.
@@ -1820,6 +1927,7 @@ class X86TargetInfo : public TargetInfo {
/// This specification is deprecated and will be removed in the future.
/// Users should prefer \see CK_K8.
// FIXME: Warn on this when the CPU is set to it.
+ //@{
CK_x86_64,
//@}
@@ -1840,11 +1948,12 @@ public:
X86TargetInfo(const llvm::Triple &Triple)
: TargetInfo(Triple), SSELevel(NoSSE), MMX3DNowLevel(NoMMX3DNow),
XOPLevel(NoXOP), HasAES(false), HasPCLMUL(false), HasLZCNT(false),
- HasRDRND(false), HasBMI(false), HasBMI2(false), HasPOPCNT(false),
- HasRTM(false), HasPRFCHW(false), HasRDSEED(false), HasTBM(false),
- HasFMA(false), HasF16C(false), HasAVX512CD(false), HasAVX512ER(false),
- HasAVX512PF(false), HasSHA(false), HasCX16(false), CPU(CK_Generic),
- FPMath(FP_Default) {
+ HasRDRND(false), HasFSGSBASE(false), HasBMI(false), HasBMI2(false),
+ HasPOPCNT(false), HasRTM(false), HasPRFCHW(false), HasRDSEED(false),
+ HasADX(false), HasTBM(false), HasFMA(false), HasF16C(false),
+ HasAVX512CD(false), HasAVX512ER(false), HasAVX512PF(false),
+ HasAVX512DQ(false), HasAVX512BW(false), HasAVX512VL(false),
+ HasSHA(false), HasCX16(false), CPU(CK_Generic), FPMath(FP_Default) {
BigEndian = false;
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
}
@@ -1873,7 +1982,14 @@ public:
NumNames = llvm::array_lengthof(AddlRegNames);
}
bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &info) const override;
+ TargetInfo::ConstraintInfo &info) const override;
+
+ bool validateOutputSize(StringRef Constraint, unsigned Size) const override;
+
+ bool validateInputSize(StringRef Constraint, unsigned Size) const override;
+
+ virtual bool validateOperandSize(StringRef Constraint, unsigned Size) const;
+
std::string convertConstraint(const char *&Constraint) const override;
const char *getClobbers() const override {
return "~{dirflag},~{fpsr},~{flags}";
@@ -1930,12 +2046,22 @@ public:
.Case("nocona", CK_Nocona)
.Case("core2", CK_Core2)
.Case("penryn", CK_Penryn)
- .Case("atom", CK_Atom)
- .Case("slm", CK_Silvermont)
- .Case("corei7", CK_Corei7)
- .Case("corei7-avx", CK_Corei7AVX)
- .Case("core-avx-i", CK_CoreAVXi)
- .Case("core-avx2", CK_CoreAVX2)
+ .Case("bonnell", CK_Bonnell)
+ .Case("atom", CK_Bonnell) // Legacy name.
+ .Case("silvermont", CK_Silvermont)
+ .Case("slm", CK_Silvermont) // Legacy name.
+ .Case("nehalem", CK_Nehalem)
+ .Case("corei7", CK_Nehalem) // Legacy name.
+ .Case("westmere", CK_Westmere)
+ .Case("sandybridge", CK_SandyBridge)
+ .Case("corei7-avx", CK_SandyBridge) // Legacy name.
+ .Case("ivybridge", CK_IvyBridge)
+ .Case("core-avx-i", CK_IvyBridge) // Legacy name.
+ .Case("haswell", CK_Haswell)
+ .Case("core-avx2", CK_Haswell) // Legacy name.
+ .Case("broadwell", CK_Broadwell)
+ .Case("skylake", CK_Skylake)
+ .Case("skx", CK_Skylake) // Legacy name.
.Case("knl", CK_KNL)
.Case("k6", CK_K6)
.Case("k6-2", CK_K6_2)
@@ -1952,6 +2078,7 @@ public:
.Case("k8-sse3", CK_K8SSE3)
.Case("opteron", CK_Opteron)
.Case("opteron-sse3", CK_OpteronSSE3)
+ .Case("barcelona", CK_AMDFAM10)
.Case("amdfam10", CK_AMDFAM10)
.Case("btver1", CK_BTVER1)
.Case("btver2", CK_BTVER2)
@@ -2008,12 +2135,15 @@ public:
case CK_Nocona:
case CK_Core2:
case CK_Penryn:
- case CK_Atom:
+ case CK_Bonnell:
case CK_Silvermont:
- case CK_Corei7:
- case CK_Corei7AVX:
- case CK_CoreAVXi:
- case CK_CoreAVX2:
+ case CK_Nehalem:
+ case CK_Westmere:
+ case CK_SandyBridge:
+ case CK_IvyBridge:
+ case CK_Haswell:
+ case CK_Broadwell:
+ case CK_Skylake:
case CK_KNL:
case CK_Athlon64:
case CK_Athlon64SSE3:
@@ -2041,8 +2171,9 @@ public:
// We accept all non-ARM calling conventions
return (CC == CC_X86ThisCall ||
CC == CC_X86FastCall ||
- CC == CC_X86StdCall ||
- CC == CC_C ||
+ CC == CC_X86StdCall ||
+ CC == CC_X86VectorCall ||
+ CC == CC_C ||
CC == CC_X86Pascal ||
CC == CC_IntelOclBicc) ? CCCR_OK : CCCR_Warning;
}
@@ -2082,10 +2213,13 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
break;
case CK_PentiumMMX:
case CK_Pentium2:
+ case CK_K6:
+ case CK_WinChipC6:
setFeatureEnabledImpl(Features, "mmx", true);
break;
case CK_Pentium3:
case CK_Pentium3M:
+ case CK_C3_2:
setFeatureEnabledImpl(Features, "sse", true);
break;
case CK_PentiumM:
@@ -2101,6 +2235,7 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabledImpl(Features, "cx16", true);
break;
case CK_Core2:
+ case CK_Bonnell:
setFeatureEnabledImpl(Features, "ssse3", true);
setFeatureEnabledImpl(Features, "cx16", true);
break;
@@ -2108,44 +2243,40 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabledImpl(Features, "sse4.1", true);
setFeatureEnabledImpl(Features, "cx16", true);
break;
- case CK_Atom:
- setFeatureEnabledImpl(Features, "ssse3", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- break;
- case CK_Silvermont:
- setFeatureEnabledImpl(Features, "sse4.2", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- break;
- case CK_Corei7:
- setFeatureEnabledImpl(Features, "sse4.2", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- break;
- case CK_Corei7AVX:
- setFeatureEnabledImpl(Features, "avx", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- break;
- case CK_CoreAVXi:
- setFeatureEnabledImpl(Features, "avx", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- setFeatureEnabledImpl(Features, "rdrnd", true);
- setFeatureEnabledImpl(Features, "f16c", true);
- break;
- case CK_CoreAVX2:
+ case CK_Skylake:
+ setFeatureEnabledImpl(Features, "avx512f", true);
+ setFeatureEnabledImpl(Features, "avx512cd", true);
+ setFeatureEnabledImpl(Features, "avx512dq", true);
+ setFeatureEnabledImpl(Features, "avx512bw", true);
+ setFeatureEnabledImpl(Features, "avx512vl", true);
+ // FALLTHROUGH
+ case CK_Broadwell:
+ setFeatureEnabledImpl(Features, "rdseed", true);
+ setFeatureEnabledImpl(Features, "adx", true);
+ // FALLTHROUGH
+ case CK_Haswell:
setFeatureEnabledImpl(Features, "avx2", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "rdrnd", true);
- setFeatureEnabledImpl(Features, "f16c", true);
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "bmi2", true);
setFeatureEnabledImpl(Features, "rtm", true);
setFeatureEnabledImpl(Features, "fma", true);
+ // FALLTHROUGH
+ case CK_IvyBridge:
+ setFeatureEnabledImpl(Features, "rdrnd", true);
+ setFeatureEnabledImpl(Features, "f16c", true);
+ setFeatureEnabledImpl(Features, "fsgsbase", true);
+ // FALLTHROUGH
+ case CK_SandyBridge:
+ setFeatureEnabledImpl(Features, "avx", true);
+ // FALLTHROUGH
+ case CK_Westmere:
+ case CK_Silvermont:
+ setFeatureEnabledImpl(Features, "aes", true);
+ setFeatureEnabledImpl(Features, "pclmul", true);
+ // FALLTHROUGH
+ case CK_Nehalem:
+ setFeatureEnabledImpl(Features, "sse4.2", true);
setFeatureEnabledImpl(Features, "cx16", true);
break;
case CK_KNL:
@@ -2153,19 +2284,19 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabledImpl(Features, "avx512cd", true);
setFeatureEnabledImpl(Features, "avx512er", true);
setFeatureEnabledImpl(Features, "avx512pf", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
+ setFeatureEnabledImpl(Features, "rdseed", true);
+ setFeatureEnabledImpl(Features, "adx", true);
setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "rdrnd", true);
- setFeatureEnabledImpl(Features, "f16c", true);
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "bmi2", true);
setFeatureEnabledImpl(Features, "rtm", true);
setFeatureEnabledImpl(Features, "fma", true);
- break;
- case CK_K6:
- case CK_WinChipC6:
- setFeatureEnabledImpl(Features, "mmx", true);
+ setFeatureEnabledImpl(Features, "rdrnd", true);
+ setFeatureEnabledImpl(Features, "f16c", true);
+ setFeatureEnabledImpl(Features, "fsgsbase", true);
+ setFeatureEnabledImpl(Features, "aes", true);
+ setFeatureEnabledImpl(Features, "pclmul", true);
+ setFeatureEnabledImpl(Features, "cx16", true);
break;
case CK_K6_2:
case CK_K6_3:
@@ -2191,43 +2322,29 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabledImpl(Features, "sse2", true);
setFeatureEnabledImpl(Features, "3dnowa", true);
break;
+ case CK_AMDFAM10:
+ setFeatureEnabledImpl(Features, "sse4a", true);
+ setFeatureEnabledImpl(Features, "lzcnt", true);
+ setFeatureEnabledImpl(Features, "popcnt", true);
+ // FALLTHROUGH
case CK_K8SSE3:
case CK_OpteronSSE3:
case CK_Athlon64SSE3:
setFeatureEnabledImpl(Features, "sse3", true);
setFeatureEnabledImpl(Features, "3dnowa", true);
break;
- case CK_AMDFAM10:
- setFeatureEnabledImpl(Features, "sse3", true);
- setFeatureEnabledImpl(Features, "sse4a", true);
- setFeatureEnabledImpl(Features, "3dnowa", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "popcnt", true);
- break;
- case CK_BTVER1:
- setFeatureEnabledImpl(Features, "ssse3", true);
- setFeatureEnabledImpl(Features, "sse4a", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "popcnt", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- break;
case CK_BTVER2:
setFeatureEnabledImpl(Features, "avx", true);
- setFeatureEnabledImpl(Features, "sse4a", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
setFeatureEnabledImpl(Features, "aes", true);
setFeatureEnabledImpl(Features, "pclmul", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "f16c", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- break;
- case CK_BDVER1:
- setFeatureEnabledImpl(Features, "xop", true);
+ // FALLTHROUGH
+ case CK_BTVER1:
+ setFeatureEnabledImpl(Features, "ssse3", true);
+ setFeatureEnabledImpl(Features, "sse4a", true);
setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
+ setFeatureEnabledImpl(Features, "popcnt", true);
setFeatureEnabledImpl(Features, "prfchw", true);
setFeatureEnabledImpl(Features, "cx16", true);
break;
@@ -2235,22 +2352,24 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabledImpl(Features, "avx2", true);
setFeatureEnabledImpl(Features, "bmi2", true);
// FALLTHROUGH
- case CK_BDVER2:
case CK_BDVER3:
+ setFeatureEnabledImpl(Features, "fsgsbase", true);
+ // FALLTHROUGH
+ case CK_BDVER2:
+ setFeatureEnabledImpl(Features, "bmi", true);
+ setFeatureEnabledImpl(Features, "fma", true);
+ setFeatureEnabledImpl(Features, "f16c", true);
+ setFeatureEnabledImpl(Features, "tbm", true);
+ // FALLTHROUGH
+ case CK_BDVER1:
+ // xop implies avx, sse4a and fma4.
setFeatureEnabledImpl(Features, "xop", true);
setFeatureEnabledImpl(Features, "lzcnt", true);
setFeatureEnabledImpl(Features, "aes", true);
setFeatureEnabledImpl(Features, "pclmul", true);
setFeatureEnabledImpl(Features, "prfchw", true);
- setFeatureEnabledImpl(Features, "bmi", true);
- setFeatureEnabledImpl(Features, "fma", true);
- setFeatureEnabledImpl(Features, "f16c", true);
- setFeatureEnabledImpl(Features, "tbm", true);
setFeatureEnabledImpl(Features, "cx16", true);
break;
- case CK_C3_2:
- setFeatureEnabledImpl(Features, "sse", true);
- break;
}
}
@@ -2305,7 +2424,8 @@ void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
Features["avx2"] = false;
case AVX512F:
Features["avx512f"] = Features["avx512cd"] = Features["avx512er"] =
- Features["avx512pf"] = false;
+ Features["avx512pf"] = Features["avx512dq"] = Features["avx512bw"] =
+ Features["avx512vl"] = false;
}
}
@@ -2404,7 +2524,8 @@ void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
setSSELevel(Features, AVX2, Enabled);
} else if (Name == "avx512f") {
setSSELevel(Features, AVX512F, Enabled);
- } else if (Name == "avx512cd" || Name == "avx512er" || Name == "avx512pf") {
+ } else if (Name == "avx512cd" || Name == "avx512er" || Name == "avx512pf"
+ || Name == "avx512dq" || Name == "avx512bw" || Name == "avx512vl") {
if (Enabled)
setSSELevel(Features, AVX512F, Enabled);
} else if (Name == "fma") {
@@ -2457,6 +2578,11 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
continue;
}
+ if (Feature == "fsgsbase") {
+ HasFSGSBASE = true;
+ continue;
+ }
+
if (Feature == "bmi") {
HasBMI = true;
continue;
@@ -2487,6 +2613,11 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
continue;
}
+ if (Feature == "adx") {
+ HasADX = true;
+ continue;
+ }
+
if (Feature == "tbm") {
HasTBM = true;
continue;
@@ -2517,6 +2648,21 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
continue;
}
+ if (Feature == "avx512dq") {
+ HasAVX512DQ = true;
+ continue;
+ }
+
+ if (Feature == "avx512bw") {
+ HasAVX512BW = true;
+ continue;
+ }
+
+ if (Feature == "avx512vl") {
+ HasAVX512VL = true;
+ continue;
+ }
+
if (Feature == "sha") {
HasSHA = true;
continue;
@@ -2606,6 +2752,10 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__amd64");
Builder.defineMacro("__x86_64");
Builder.defineMacro("__x86_64__");
+ if (getTriple().getArchName() == "x86_64h") {
+ Builder.defineMacro("__x86_64h");
+ Builder.defineMacro("__x86_64h__");
+ }
} else {
DefineStd(Builder, "i386", Opts);
}
@@ -2668,18 +2818,30 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_Penryn:
defineCPUMacros(Builder, "core2");
break;
- case CK_Atom:
+ case CK_Bonnell:
defineCPUMacros(Builder, "atom");
break;
case CK_Silvermont:
defineCPUMacros(Builder, "slm");
break;
- case CK_Corei7:
- case CK_Corei7AVX:
- case CK_CoreAVXi:
- case CK_CoreAVX2:
+ case CK_Nehalem:
+ case CK_Westmere:
+ case CK_SandyBridge:
+ case CK_IvyBridge:
+ case CK_Haswell:
+ case CK_Broadwell:
+ // FIXME: Historically, we defined this legacy name, it would be nice to
+ // remove it at some point. We've never exposed fine-grained names for
+ // recent primary x86 CPUs, and we should keep it that way.
defineCPUMacros(Builder, "corei7");
break;
+ case CK_Skylake:
+ // FIXME: Historically, we defined this legacy name, it would be nice to
+ // remove it at some point. This is the only fine-grained CPU macro in the
+ // main intel CPU line, and it would be better to not have these and force
+ // people to use ISA macros.
+ defineCPUMacros(Builder, "skx");
+ break;
case CK_KNL:
defineCPUMacros(Builder, "knl");
break;
@@ -2766,6 +2928,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasRDRND)
Builder.defineMacro("__RDRND__");
+ if (HasFSGSBASE)
+ Builder.defineMacro("__FSGSBASE__");
+
if (HasBMI)
Builder.defineMacro("__BMI__");
@@ -2784,6 +2949,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasRDSEED)
Builder.defineMacro("__RDSEED__");
+ if (HasADX)
+ Builder.defineMacro("__ADX__");
+
if (HasTBM)
Builder.defineMacro("__TBM__");
@@ -2810,6 +2978,12 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVX512ER__");
if (HasAVX512PF)
Builder.defineMacro("__AVX512PF__");
+ if (HasAVX512DQ)
+ Builder.defineMacro("__AVX512DQ__");
+ if (HasAVX512BW)
+ Builder.defineMacro("__AVX512BW__");
+ if (HasAVX512VL)
+ Builder.defineMacro("__AVX512VL__");
if (HasSHA)
Builder.defineMacro("__SHA__");
@@ -2893,23 +3067,26 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("avx512cd", HasAVX512CD)
.Case("avx512er", HasAVX512ER)
.Case("avx512pf", HasAVX512PF)
+ .Case("avx512dq", HasAVX512DQ)
+ .Case("avx512bw", HasAVX512BW)
+ .Case("avx512vl", HasAVX512VL)
.Case("bmi", HasBMI)
.Case("bmi2", HasBMI2)
.Case("cx16", HasCX16)
.Case("f16c", HasF16C)
.Case("fma", HasFMA)
.Case("fma4", XOPLevel >= FMA4)
- .Case("tbm", HasTBM)
+ .Case("fsgsbase", HasFSGSBASE)
.Case("lzcnt", HasLZCNT)
- .Case("rdrnd", HasRDRND)
.Case("mm3dnow", MMX3DNowLevel >= AMD3DNow)
.Case("mm3dnowa", MMX3DNowLevel >= AMD3DNowAthlon)
.Case("mmx", MMX3DNowLevel >= MMX)
.Case("pclmul", HasPCLMUL)
.Case("popcnt", HasPOPCNT)
- .Case("rtm", HasRTM)
.Case("prfchw", HasPRFCHW)
+ .Case("rdrnd", HasRDRND)
.Case("rdseed", HasRDSEED)
+ .Case("rtm", HasRTM)
.Case("sha", HasSHA)
.Case("sse", SSELevel >= SSE1)
.Case("sse2", SSELevel >= SSE2)
@@ -2918,6 +3095,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("sse4.1", SSELevel >= SSE41)
.Case("sse4.2", SSELevel >= SSE42)
.Case("sse4a", XOPLevel >= SSE4A)
+ .Case("tbm", HasTBM)
.Case("x86", true)
.Case("x86_32", getTriple().getArch() == llvm::Triple::x86)
.Case("x86_64", getTriple().getArch() == llvm::Triple::x86_64)
@@ -2930,6 +3108,28 @@ X86TargetInfo::validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const {
switch (*Name) {
default: return false;
+ case 'I':
+ Info.setRequiresImmediate(0, 31);
+ return true;
+ case 'J':
+ Info.setRequiresImmediate(0, 63);
+ return true;
+ case 'K':
+ Info.setRequiresImmediate(-128, 127);
+ return true;
+ case 'L':
+ // FIXME: properly analyze this constraint:
+ // must be one of 0xff, 0xffff, or 0xffffffff
+ return true;
+ case 'M':
+ Info.setRequiresImmediate(0, 3);
+ return true;
+ case 'N':
+ Info.setRequiresImmediate(0, 255);
+ return true;
+ case 'O':
+ Info.setRequiresImmediate(0, 127);
+ return true;
case 'Y': // first letter of a pair:
switch (*(Name+1)) {
default: return false;
@@ -2974,6 +3174,39 @@ X86TargetInfo::validateAsmConstraint(const char *&Name,
}
}
+bool X86TargetInfo::validateOutputSize(StringRef Constraint,
+ unsigned Size) const {
+ // Strip off constraint modifiers.
+ while (Constraint[0] == '=' ||
+ Constraint[0] == '+' ||
+ Constraint[0] == '&')
+ Constraint = Constraint.substr(1);
+
+ return validateOperandSize(Constraint, Size);
+}
+
+bool X86TargetInfo::validateInputSize(StringRef Constraint,
+ unsigned Size) const {
+ return validateOperandSize(Constraint, Size);
+}
+
+bool X86TargetInfo::validateOperandSize(StringRef Constraint,
+ unsigned Size) const {
+ switch (Constraint[0]) {
+ default: break;
+ case 'y':
+ return Size <= 64;
+ case 'f':
+ case 't':
+ case 'u':
+ return Size <= 128;
+ case 'x':
+ // 256-bit ymm registers can be used if target supports AVX.
+ return Size <= (SSELevel >= AVX ? 256U : 128U);
+ }
+
+ return true;
+}
std::string
X86TargetInfo::convertConstraint(const char *&Constraint) const {
@@ -3030,18 +3263,25 @@ public:
if (RegNo == 1) return 2;
return -1;
}
- bool validateInputSize(StringRef Constraint,
- unsigned Size) const override {
+ bool validateOperandSize(StringRef Constraint,
+ unsigned Size) const override {
switch (Constraint[0]) {
default: break;
+ case 'R':
+ case 'q':
+ case 'Q':
case 'a':
case 'b':
case 'c':
case 'd':
+ case 'S':
+ case 'D':
return Size <= 32;
+ case 'A':
+ return Size <= 64;
}
- return true;
+ return X86TargetInfo::validateOperandSize(Constraint, Size);
}
};
} // end anonymous namespace
@@ -3304,9 +3544,10 @@ public:
Int64Type = IsX32 ? SignedLongLong : SignedLong;
RegParmMax = 6;
+ // Pointers are 32-bit in x32.
DescriptionString = (IsX32)
- ? "e-m:e-" "p:32:32-" "i64:64-f80:128-n8:16:32:64-S128"
- : "e-m:e-" "i64:64-f80:128-n8:16:32:64-S128";
+ ? "e-m:e-p:32:32-i64:64-f80:128-n8:16:32:64-S128"
+ : "e-m:e-i64:64-f80:128-n8:16:32:64-S128";
// Use fpret only for long double.
RealTypeUsesObjCFPRet = (1 << TargetInfo::LongDouble);
@@ -3330,6 +3571,7 @@ public:
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
return (CC == CC_C ||
+ CC == CC_X86VectorCall ||
CC == CC_IntelOclBicc ||
CC == CC_X86_64Win64) ? CCCR_OK : CCCR_Warning;
}
@@ -3338,6 +3580,8 @@ public:
return CC_C;
}
+ // for x32 we need it here explicitly
+ bool hasInt128Type() const override { return true; }
};
} // end anonymous namespace
@@ -3367,6 +3611,7 @@ public:
}
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
return (CC == CC_C ||
+ CC == CC_X86VectorCall ||
CC == CC_IntelOclBicc ||
CC == CC_X86_64SysV) ? CCCR_OK : CCCR_Warning;
}
@@ -3404,6 +3649,10 @@ public:
DefineStd(Builder, "WIN64", Opts);
Builder.defineMacro("__MINGW64__");
addMinGWDefines(Opts, Builder);
+
+ // GCC defines this macro when it is using __gxx_personality_seh0.
+ if (!Opts.SjLjExceptions)
+ Builder.defineMacro("__SEH__");
}
};
} // end anonymous namespace
@@ -3417,7 +3666,7 @@ public:
MaxVectorAlign = 256;
// The 64-bit iOS simulator uses the builtin bool type for Objective-C.
llvm::Triple T = llvm::Triple(Triple);
- if (T.getOS() == llvm::Triple::IOS)
+ if (T.isiOS())
UseSignedCharForObjCBool = false;
DescriptionString = "e-m:o-i64:64-f80:128-n8:16:32:64-S128";
}
@@ -3492,6 +3741,14 @@ class ARMTargetInfo : public TargetInfo {
unsigned CRC : 1;
unsigned Crypto : 1;
+ // ACLE 6.5.1 Hardware floating point
+ enum {
+ HW_FP_HP = (1 << 1), /// half (16-bit)
+ HW_FP_SP = (1 << 2), /// single (32-bit)
+ HW_FP_DP = (1 << 3), /// double (64-bit)
+ };
+ uint32_t HW_FP;
+
static const Builtin::Info BuiltinInfo[];
static bool shouldUseInlineAtomic(const llvm::Triple &T) {
@@ -3531,8 +3788,8 @@ class ARMTargetInfo : public TargetInfo {
DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
const llvm::Triple &T = getTriple();
- // size_t is unsigned long on Darwin and NetBSD.
- if (T.isOSDarwin() || T.getOS() == llvm::Triple::NetBSD)
+ // size_t is unsigned long on MachO-derived environments and NetBSD.
+ if (T.isOSBinFormatMachO() || T.getOS() == llvm::Triple::NetBSD)
SizeType = UnsignedLong;
else
SizeType = UnsignedInt;
@@ -3555,42 +3812,27 @@ class ARMTargetInfo : public TargetInfo {
ZeroLengthBitfieldBoundary = 0;
- if (IsThumb) {
- // Thumb1 add sp, #imm requires the immediate value be multiple of 4,
- // so set preferred for small types to 32.
- if (T.isOSBinFormatMachO()) {
- DescriptionString = BigEndian ?
- "E-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-"
- "v128:64:128-a:0:32-n32-S64" :
- "e-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-"
- "v128:64:128-a:0:32-n32-S64";
- } else if (T.isOSWindows()) {
- // FIXME: this is invalid for WindowsCE
- assert(!BigEndian && "Windows on ARM does not support big endian");
- DescriptionString = "e"
- "-m:e"
- "-p:32:32"
- "-i1:8:32-i8:8:32-i16:16:32-i64:64"
- "-v128:64:128"
- "-a:0:32"
- "-n32"
- "-S64";
- } else {
- DescriptionString = BigEndian ?
- "E-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-"
- "v128:64:128-a:0:32-n32-S64" :
- "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-"
- "v128:64:128-a:0:32-n32-S64";
- }
+ // Thumb1 add sp, #imm requires the immediate value be multiple of 4,
+ // so set preferred for small types to 32.
+ if (T.isOSBinFormatMachO()) {
+ DescriptionString =
+ BigEndian ? "E-m:o-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ : "e-m:o-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64";
+ } else if (T.isOSWindows()) {
+ // FIXME: this is invalid for WindowsCE
+ assert(!BigEndian && "Windows on ARM does not support big endian");
+ DescriptionString = "e"
+ "-m:e"
+ "-p:32:32"
+ "-i64:64"
+ "-v128:64:128"
+ "-a:0:32"
+ "-n32"
+ "-S64";
} else {
- if (T.isOSBinFormatMachO())
- DescriptionString = BigEndian ?
- "E-m:o-p:32:32-i64:64-v128:64:128-n32-S64" :
- "e-m:o-p:32:32-i64:64-v128:64:128-n32-S64";
- else
- DescriptionString = BigEndian ?
- "E-m:e-p:32:32-i64:64-v128:64:128-n32-S64" :
- "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64";
+ DescriptionString =
+ BigEndian ? "E-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ : "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64";
}
// FIXME: Enumerated types are variable width in straight AAPCS.
@@ -3621,31 +3863,16 @@ class ARMTargetInfo : public TargetInfo {
/// gcc.
ZeroLengthBitfieldBoundary = 32;
- if (IsThumb) {
- // Thumb1 add sp, #imm requires the immediate value be multiple of 4,
- // so set preferred for small types to 32.
- if (T.isOSBinFormatMachO())
- DescriptionString = BigEndian ?
- "E-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64"
- "-v64:32:64-v128:32:128-a:0:32-n32-S32" :
- "e-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64"
- "-v64:32:64-v128:32:128-a:0:32-n32-S32";
- else
- DescriptionString = BigEndian ?
- "E-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64"
- "-v64:32:64-v128:32:128-a:0:32-n32-S32" :
- "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64"
- "-v64:32:64-v128:32:128-a:0:32-n32-S32";
- } else {
- if (T.isOSBinFormatMachO())
- DescriptionString = BigEndian ?
- "E-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32" :
- "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32";
- else
- DescriptionString = BigEndian ?
- "E-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32" :
- "e-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32";
- }
+ if (T.isOSBinFormatMachO())
+ DescriptionString =
+ BigEndian
+ ? "E-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+ : "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32";
+ else
+ DescriptionString =
+ BigEndian
+ ? "E-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+ : "e-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32";
// FIXME: Override "preferred align" for double and long long.
}
@@ -3653,7 +3880,7 @@ class ARMTargetInfo : public TargetInfo {
public:
ARMTargetInfo(const llvm::Triple &Triple, bool IsBigEndian)
: TargetInfo(Triple), CPU("arm1136j-s"), FPMath(FP_Default),
- IsAAPCS(true) {
+ IsAAPCS(true), HW_FP(0) {
BigEndian = IsBigEndian;
switch (getTriple().getOS()) {
@@ -3672,7 +3899,45 @@ public:
// FIXME: Should we just treat this as a feature?
IsThumb = getTriple().getArchName().startswith("thumb");
- setABI("aapcs-linux");
+ // FIXME: This duplicates code from the driver that sets the -target-abi
+ // option - this code is used if -target-abi isn't passed and should
+ // be unified in some way.
+ if (Triple.isOSBinFormatMachO()) {
+ // The backend is hardwired to assume AAPCS for M-class processors, ensure
+ // the frontend matches that.
+ if (Triple.getEnvironment() == llvm::Triple::EABI ||
+ Triple.getOS() == llvm::Triple::UnknownOS ||
+ StringRef(CPU).startswith("cortex-m")) {
+ setABI("aapcs");
+ } else {
+ setABI("apcs-gnu");
+ }
+ } else if (Triple.isOSWindows()) {
+ // FIXME: this is invalid for WindowsCE
+ setABI("aapcs");
+ } else {
+ // Select the default based on the platform.
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::Android:
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::GNUEABIHF:
+ setABI("aapcs-linux");
+ break;
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::EABI:
+ setABI("aapcs");
+ break;
+ case llvm::Triple::GNU:
+ setABI("apcs-gnu");
+ break;
+ default:
+ if (Triple.getOS() == llvm::Triple::NetBSD)
+ setABI("apcs-gnu");
+ else
+ setABI("aapcs");
+ break;
+ }
+ }
// ARM targets default to using the ARM C++ ABI.
TheCXXABI.set(TargetCXXABI::GenericARM);
@@ -3683,8 +3948,8 @@ public:
MaxAtomicInlineWidth = 64;
// Do force alignment of members that follow zero length bitfields. If
- // the alignment of the zero-length bitfield is greater than the member
- // that follows it, `bar', `bar' will be aligned as the type of the
+ // the alignment of the zero-length bitfield is greater than the member
+ // that follows it, `bar', `bar' will be aligned as the type of the
// zero length bitfield.
UseZeroLengthBitfieldAlignment = true;
}
@@ -3708,16 +3973,10 @@ public:
}
void getDefaultFeatures(llvm::StringMap<bool> &Features) const override {
- if (IsAAPCS)
- Features["aapcs"] = true;
- else
- Features["apcs"] = true;
-
StringRef ArchName = getTriple().getArchName();
if (CPU == "arm1136jf-s" || CPU == "arm1176jzf-s" || CPU == "mpcore")
Features["vfp2"] = true;
- else if (CPU == "cortex-a8" || CPU == "cortex-a9" ||
- CPU == "cortex-a9-mp") {
+ else if (CPU == "cortex-a8" || CPU == "cortex-a9") {
Features["vfp3"] = true;
Features["neon"] = true;
}
@@ -3726,7 +3985,7 @@ public:
Features["neon"] = true;
} else if (CPU == "swift" || CPU == "cortex-a7" ||
CPU == "cortex-a12" || CPU == "cortex-a15" ||
- CPU == "krait") {
+ CPU == "cortex-a17" || CPU == "krait") {
Features["vfp4"] = true;
Features["neon"] = true;
Features["hwdiv"] = true;
@@ -3752,7 +4011,7 @@ public:
ArchName == "thumbebv8a" || ArchName == "thumbebv8") {
Features["hwdiv"] = true;
Features["hwdiv-arm"] = true;
- } else if (CPU == "cortex-m3" || CPU == "cortex-m4") {
+ } else if (CPU == "cortex-m3" || CPU == "cortex-m4" || CPU == "cortex-m7") {
Features["hwdiv"] = true;
}
}
@@ -3764,29 +4023,38 @@ public:
Crypto = 0;
SoftFloat = SoftFloatABI = false;
HWDiv = 0;
- for (unsigned i = 0, e = Features.size(); i != e; ++i) {
- if (Features[i] == "+soft-float")
+
+ for (const auto &Feature : Features) {
+ if (Feature == "+soft-float") {
SoftFloat = true;
- else if (Features[i] == "+soft-float-abi")
+ } else if (Feature == "+soft-float-abi") {
SoftFloatABI = true;
- else if (Features[i] == "+vfp2")
+ } else if (Feature == "+vfp2") {
FPU |= VFP2FPU;
- else if (Features[i] == "+vfp3")
+ HW_FP = HW_FP_SP | HW_FP_DP;
+ } else if (Feature == "+vfp3") {
FPU |= VFP3FPU;
- else if (Features[i] == "+vfp4")
+ HW_FP = HW_FP_SP | HW_FP_DP;
+ } else if (Feature == "+vfp4") {
FPU |= VFP4FPU;
- else if (Features[i] == "+fp-armv8")
+ HW_FP = HW_FP_SP | HW_FP_DP | HW_FP_HP;
+ } else if (Feature == "+fp-armv8") {
FPU |= FPARMV8;
- else if (Features[i] == "+neon")
+ HW_FP = HW_FP_SP | HW_FP_DP | HW_FP_HP;
+ } else if (Feature == "+neon") {
FPU |= NeonFPU;
- else if (Features[i] == "+hwdiv")
+ HW_FP = HW_FP_SP | HW_FP_DP;
+ } else if (Feature == "+hwdiv") {
HWDiv |= HWDivThumb;
- else if (Features[i] == "+hwdiv-arm")
+ } else if (Feature == "+hwdiv-arm") {
HWDiv |= HWDivARM;
- else if (Features[i] == "+crc")
+ } else if (Feature == "+crc") {
CRC = 1;
- else if (Features[i] == "+crypto")
+ } else if (Feature == "+crypto") {
Crypto = 1;
+ } else if (Feature == "+fp-only-sp") {
+ HW_FP &= ~HW_FP_DP;
+ }
}
if (!(FPU & NeonFPU) && FPMath == FP_Neon) {
@@ -3800,13 +4068,13 @@ public:
Features.push_back("-neonfp");
// Remove front-end specific options which the backend handles differently.
- std::vector<std::string>::iterator it;
- it = std::find(Features.begin(), Features.end(), "+soft-float");
- if (it != Features.end())
- Features.erase(it);
- it = std::find(Features.begin(), Features.end(), "+soft-float-abi");
- if (it != Features.end())
- Features.erase(it);
+ const StringRef FrontEndFeatures[] = { "+soft-float", "+soft-float-abi" };
+ for (const auto &FEFeature : FrontEndFeatures) {
+ auto Feature = std::find(Features.begin(), Features.end(), FEFeature);
+ if (Feature != Features.end())
+ Features.erase(Feature);
+ }
+
return true;
}
@@ -3822,40 +4090,43 @@ public:
}
// FIXME: Should we actually have some table instead of these switches?
static const char *getCPUDefineSuffix(StringRef Name) {
- return llvm::StringSwitch<const char*>(Name)
- .Cases("arm8", "arm810", "4")
- .Cases("strongarm", "strongarm110", "strongarm1100", "strongarm1110", "4")
- .Cases("arm7tdmi", "arm7tdmi-s", "arm710t", "arm720t", "arm9", "4T")
- .Cases("arm9tdmi", "arm920", "arm920t", "arm922t", "arm940t", "4T")
- .Case("ep9312", "4T")
- .Cases("arm10tdmi", "arm1020t", "5T")
- .Cases("arm9e", "arm946e-s", "arm966e-s", "arm968e-s", "5TE")
- .Case("arm926ej-s", "5TEJ")
- .Cases("arm10e", "arm1020e", "arm1022e", "5TE")
- .Cases("xscale", "iwmmxt", "5TE")
- .Case("arm1136j-s", "6J")
- .Cases("arm1176jz-s", "arm1176jzf-s", "6ZK")
- .Cases("arm1136jf-s", "mpcorenovfp", "mpcore", "6K")
- .Cases("arm1156t2-s", "arm1156t2f-s", "6T2")
- .Cases("cortex-a5", "cortex-a7", "cortex-a8", "cortex-a9-mp", "7A")
- .Cases("cortex-a9", "cortex-a12", "cortex-a15", "krait", "7A")
- .Cases("cortex-r4", "cortex-r5", "7R")
- .Case("swift", "7S")
- .Case("cyclone", "8A")
- .Case("cortex-m3", "7M")
- .Case("cortex-m4", "7EM")
- .Case("cortex-m0", "6M")
- .Cases("cortex-a53", "cortex-a57", "8A")
- .Default(nullptr);
+ return llvm::StringSwitch<const char *>(Name)
+ .Cases("arm8", "arm810", "4")
+ .Cases("strongarm", "strongarm110", "strongarm1100", "strongarm1110",
+ "4")
+ .Cases("arm7tdmi", "arm7tdmi-s", "arm710t", "arm720t", "arm9", "4T")
+ .Cases("arm9tdmi", "arm920", "arm920t", "arm922t", "arm940t", "4T")
+ .Case("ep9312", "4T")
+ .Cases("arm10tdmi", "arm1020t", "5T")
+ .Cases("arm9e", "arm946e-s", "arm966e-s", "arm968e-s", "5TE")
+ .Case("arm926ej-s", "5TEJ")
+ .Cases("arm10e", "arm1020e", "arm1022e", "5TE")
+ .Cases("xscale", "iwmmxt", "5TE")
+ .Case("arm1136j-s", "6J")
+ .Cases("arm1176jz-s", "arm1176jzf-s", "6ZK")
+ .Cases("arm1136jf-s", "mpcorenovfp", "mpcore", "6K")
+ .Cases("arm1156t2-s", "arm1156t2f-s", "6T2")
+ .Cases("cortex-a5", "cortex-a7", "cortex-a8", "7A")
+ .Cases("cortex-a9", "cortex-a12", "cortex-a15", "cortex-a17", "krait",
+ "7A")
+ .Cases("cortex-r4", "cortex-r5", "7R")
+ .Case("swift", "7S")
+ .Case("cyclone", "8A")
+ .Case("cortex-m3", "7M")
+ .Cases("cortex-m4", "cortex-m7", "7EM")
+ .Case("cortex-m0", "6M")
+ .Cases("cortex-a53", "cortex-a57", "8A")
+ .Default(nullptr);
}
static const char *getCPUProfile(StringRef Name) {
- return llvm::StringSwitch<const char*>(Name)
- .Cases("cortex-a5", "cortex-a7", "cortex-a8", "A")
- .Cases("cortex-a9", "cortex-a12", "cortex-a15", "krait", "A")
- .Cases("cortex-a53", "cortex-a57", "A")
- .Cases("cortex-m3", "cortex-m4", "cortex-m0", "M")
- .Cases("cortex-r4", "cortex-r5", "R")
- .Default("");
+ return llvm::StringSwitch<const char *>(Name)
+ .Cases("cortex-a5", "cortex-a7", "cortex-a8", "A")
+ .Cases("cortex-a9", "cortex-a12", "cortex-a15", "cortex-a17", "krait",
+ "A")
+ .Cases("cortex-a53", "cortex-a57", "A")
+ .Cases("cortex-m3", "cortex-m4", "cortex-m0", "cortex-m7", "M")
+ .Cases("cortex-r4", "cortex-r5", "R")
+ .Default("");
}
bool setCPU(const std::string &Name) override {
if (!getCPUDefineSuffix(Name))
@@ -3895,9 +4166,8 @@ public:
StringRef CPUArch = getCPUDefineSuffix(CPU);
unsigned int CPUArchVer;
- if(CPUArch.substr(0, 1).getAsInteger<unsigned int>(10, CPUArchVer)) {
+ if (CPUArch.substr(0, 1).getAsInteger<unsigned int>(10, CPUArchVer))
llvm_unreachable("Invalid char for architecture version number");
- }
Builder.defineMacro("__ARM_ARCH_" + CPUArch + "__");
// ACLE 6.4.1 ARM/Thumb instruction set architecture
@@ -3906,6 +4176,10 @@ public:
// __ARM_ARCH is defined as an integer value indicating the current ARM ISA
Builder.defineMacro("__ARM_ARCH", CPUArch.substr(0, 1));
+ if (CPUArch[0] >= '8') {
+ Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN");
+ Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING");
+ }
// __ARM_ARCH_ISA_ARM is defined to 1 if the core supports the ARM ISA. It
// is not defined for the M-profile.
@@ -3931,6 +4205,10 @@ public:
if (!CPUProfile.empty())
Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + CPUProfile + "'");
+ // ACLE 6.5.1 Hardware Floating Point
+ if (HW_FP)
+ Builder.defineMacro("__ARM_FP", "0x" + llvm::utohexstr(HW_FP));
+
// ACLE predefines.
Builder.defineMacro("__ARM_ACLE", "200");
@@ -4008,6 +4286,13 @@ public:
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
}
+
+ bool is5EOrAbove = (CPUArchVer >= 6 ||
+ (CPUArchVer == 5 &&
+ CPUArch.find('E') != StringRef::npos));
+ bool is32Bit = (!IsThumb || supportsThumb2(ArchName, CPUArch, CPUArchVer));
+ if (is5EOrAbove && is32Bit && (CPUProfile != "M" || CPUArch == "7EM"))
+ Builder.defineMacro("__ARM_FEATURE_DSP");
}
void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const override {
@@ -4032,6 +4317,13 @@ public:
case 'P': // VFP Floating point register double precision
Info.setAllowsRegister();
return true;
+ case 'I':
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ // FIXME
+ return true;
case 'Q': // A memory address that is a single base register.
Info.setAllowsMemory();
return true;
@@ -4068,8 +4360,9 @@ public:
}
return R;
}
- bool validateConstraintModifier(StringRef Constraint, const char Modifier,
- unsigned Size) const override {
+ bool
+ validateConstraintModifier(StringRef Constraint, char Modifier, unsigned Size,
+ std::string &SuggestedModifier) const override {
bool isOutput = (Constraint[0] == '=');
bool isInOut = (Constraint[0] == '+');
@@ -4297,7 +4590,8 @@ public:
: DarwinTargetInfo<ARMleTargetInfo>(Triple) {
HasAlignMac68kSupport = true;
// iOS always has 64-bit atomic instructions.
- // FIXME: This should be based off of the target features in ARMleTargetInfo.
+ // FIXME: This should be based off of the target features in
+ // ARMleTargetInfo.
MaxAtomicInlineWidth = 64;
// Darwin on iOS uses a variant of the ARM C++ ABI.
@@ -4361,7 +4655,7 @@ public:
}
StringRef getABI() const override { return ABI; }
- virtual bool setABI(const std::string &Name) {
+ bool setABI(const std::string &Name) override {
if (Name != "aapcs" && Name != "darwinpcs")
return false;
@@ -4369,7 +4663,7 @@ public:
return true;
}
- virtual bool setCPU(const std::string &Name) {
+ bool setCPU(const std::string &Name) override {
bool CPUKnown = llvm::StringSwitch<bool>(Name)
.Case("generic", true)
.Cases("cortex-a53", "cortex-a57", true)
@@ -4379,7 +4673,7 @@ public:
}
virtual void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const {
+ MacroBuilder &Builder) const override {
// Target identification.
Builder.defineMacro("__aarch64__");
@@ -4400,6 +4694,10 @@ public:
Builder.defineMacro("__ARM_FEATURE_CLZ");
Builder.defineMacro("__ARM_FEATURE_FMA");
Builder.defineMacro("__ARM_FEATURE_DIV");
+ Builder.defineMacro("__ARM_FEATURE_IDIV"); // As specified in ACLE
+ Builder.defineMacro("__ARM_FEATURE_DIV"); // For backwards compatibility
+ Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN");
+ Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING");
Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
@@ -4413,7 +4711,7 @@ public:
if (Opts.FastMath || Opts.FiniteMathOnly)
Builder.defineMacro("__ARM_FP_FAST");
- if ((Opts.C99 || Opts.C11) && !Opts.Freestanding)
+ if (Opts.C99 && !Opts.Freestanding)
Builder.defineMacro("__ARM_FP_FENV_ROUNDING");
Builder.defineMacro("__ARM_SIZEOF_WCHAR_T", Opts.ShortWChar ? "2" : "4");
@@ -4435,12 +4733,12 @@ public:
}
virtual void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const {
+ unsigned &NumRecords) const override {
Records = BuiltinInfo;
NumRecords = clang::AArch64::LastTSBuiltin - Builtin::FirstTSBuiltin;
}
- virtual bool hasFeature(StringRef Feature) const {
+ bool hasFeature(StringRef Feature) const override {
return Feature == "aarch64" ||
Feature == "arm64" ||
(Feature == "neon" && FPU == NeonMode);
@@ -4465,19 +4763,20 @@ public:
return true;
}
- virtual bool isCLZForZeroUndef() const { return false; }
+ bool isCLZForZeroUndef() const override { return false; }
- virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::AArch64ABIBuiltinVaList;
}
virtual void getGCCRegNames(const char *const *&Names,
- unsigned &NumNames) const;
+ unsigned &NumNames) const override;
virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const;
+ unsigned &NumAliases) const override;
- virtual bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &Info) const {
+ virtual bool
+ validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
switch (*Name) {
default:
return false;
@@ -4500,11 +4799,11 @@ public:
Info.setAllowsRegister();
return true;
case 'U':
- // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes, whatever they may be
- // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be
- // Usa: An absolute symbolic address
- // Ush: The high part (bits 32:12) of a pc-relative symbolic address
- llvm_unreachable("FIXME: Unimplemented support for bizarre constraints");
+ // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
+ // Utf: A memory address suitable for ldp/stp in TF mode.
+ // Usa: An absolute symbolic address.
+ // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
+ llvm_unreachable("FIXME: Unimplemented support for U* constraints.");
case 'z': // Zero register, wzr or xzr
Info.setAllowsRegister();
return true;
@@ -4515,9 +4814,40 @@ public:
return false;
}
- virtual const char *getClobbers() const { return ""; }
+ bool
+ validateConstraintModifier(StringRef Constraint, char Modifier, unsigned Size,
+ std::string &SuggestedModifier) const override {
+ // Strip off constraint modifiers.
+ while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
+ Constraint = Constraint.substr(1);
+
+ switch (Constraint[0]) {
+ default:
+ return true;
+ case 'z':
+ case 'r': {
+ switch (Modifier) {
+ case 'x':
+ case 'w':
+ // For now assume that the person knows what they're
+ // doing with the modifier.
+ return true;
+ default:
+ // By default an 'r' constraint will be in the 'x'
+ // registers.
+ if (Size == 64)
+ return true;
+
+ SuggestedModifier = "w";
+ return false;
+ }
+ }
+ }
+ }
+
+ const char *getClobbers() const override { return ""; }
- int getEHDataRegisterNumber(unsigned RegNo) const {
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
if (RegNo == 0)
return 0;
if (RegNo == 1)
@@ -4652,7 +4982,7 @@ public:
TheCXXABI.set(TargetCXXABI::iOS64);
}
- virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::CharPtrBuiltinVaList;
}
};
@@ -4857,6 +5187,16 @@ public:
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override {
// FIXME: Implement!
+ switch (*Name) {
+ case 'I': // Signed 13-bit constant
+ case 'J': // Zero
+ case 'K': // 32-bit constant with the low 12 bits clear
+ case 'L': // A constant in the range supported by movcc (11-bit signed imm)
+ case 'M': // A constant in the range supported by movrcc (19-bit signed imm)
+ case 'N': // Same as 'K' but zext (required for SIMode)
+ case 'O': // The constant 4096
+ return true;
+ }
return false;
}
const char *getClobbers() const override {
@@ -4962,10 +5302,8 @@ public:
SparcTargetInfo::getTargetDefines(Opts, Builder);
Builder.defineMacro("__sparcv9");
Builder.defineMacro("__arch64__");
- // Solaris and its derivative AuroraUX don't need these variants, but the
- // BSDs do.
- if (getTriple().getOS() != llvm::Triple::Solaris &&
- getTriple().getOS() != llvm::Triple::AuroraUX) {
+ // Solaris doesn't need these variants, but the BSDs do.
+ if (getTriple().getOS() != llvm::Triple::Solaris) {
Builder.defineMacro("__sparc64__");
Builder.defineMacro("__sparc_v9__");
Builder.defineMacro("__sparcv9__");
@@ -4992,14 +5330,6 @@ public:
} // end anonymous namespace.
namespace {
-class AuroraUXSparcV8TargetInfo : public AuroraUXTargetInfo<SparcV8TargetInfo> {
-public:
- AuroraUXSparcV8TargetInfo(const llvm::Triple &Triple)
- : AuroraUXTargetInfo<SparcV8TargetInfo>(Triple) {
- SizeType = UnsignedInt;
- PtrDiffType = SignedInt;
- }
-};
class SolarisSparcV8TargetInfo : public SolarisTargetInfo<SparcV8TargetInfo> {
public:
SolarisSparcV8TargetInfo(const llvm::Triple &Triple)
@@ -5131,7 +5461,7 @@ namespace {
IntPtrType = SignedInt;
PtrDiffType = SignedInt;
SigAtomicType = SignedLong;
- DescriptionString = "e-m:e-p:16:16-i32:16:32-n8:16";
+ DescriptionString = "e-m:e-p:16:16-i32:16:32-a:16-n8:16";
}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
@@ -5156,8 +5486,16 @@ namespace {
Aliases = nullptr;
NumAliases = 0;
}
- bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &info) const override {
+ bool
+ validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override {
+ // FIXME: implement
+ switch (*Name) {
+ case 'K': // the constant 1
+ case 'L': // constant -1^20 .. 1^19
+ case 'M': // constant 1-4:
+ return true;
+ }
// No target constraints for now.
return false;
}
@@ -5197,6 +5535,8 @@ namespace {
3, // opencl_global
4, // opencl_local
5, // opencl_constant
+ // FIXME: generic has to be added to the target
+ 0, // opencl_generic
0, // cuda_device
0, // cuda_constant
0 // cuda_shared
@@ -5443,7 +5783,6 @@ public:
switch (*Name) {
default:
return false;
-
case 'r': // CPU registers.
case 'd': // Equivalent to "r" unless generating MIPS16 code.
case 'y': // Equivalent to "r", backward compatibility only.
@@ -5453,6 +5792,15 @@ public:
case 'x': // hilo register pair
Info.setAllowsRegister();
return true;
+ case 'I': // Signed 16-bit constant
+ case 'J': // Integer 0
+ case 'K': // Unsigned 16-bit constant
+ case 'L': // Signed 32-bit constant, lower 16-bit zeros (for lui)
+ case 'M': // Constants not loadable via lui, addiu, or ori
+ case 'N': // Constant -1 to -65535
+ case 'O': // A signed 15-bit constant
+ case 'P': // A constant between 1 go 65535
+ return true;
case 'R': // An address that can be used in a non-macro load or store
Info.setAllowsMemory();
return true;
@@ -5460,8 +5808,28 @@ public:
}
const char *getClobbers() const override {
- // FIXME: Implement!
- return "";
+ // In GCC, $1 is not widely used in generated code (it's used only in a few
+ // specific situations), so there is no real need for users to add it to
+ // the clobbers list if they want to use it in their inline assembly code.
+ //
+ // In LLVM, $1 is treated as a normal GPR and is always allocatable during
+ // code generation, so using it in inline assembly without adding it to the
+ // clobbers list can cause conflicts between the inline assembly code and
+ // the surrounding generated code.
+ //
+ // Another problem is that LLVM is allowed to choose $1 for inline assembly
+ // operands, which will conflict with the ".set at" assembler option (which
+ // we use only for inline assembly, in order to maintain compatibility with
+ // GCC) and will also conflict with the user's usage of $1.
+ //
+ // The easiest way to avoid these conflicts and keep $1 as an allocatable
+ // register for generated code is to automatically clobber $1 for all inline
+ // assembly code.
+ //
+ // FIXME: We should automatically clobber $1 only for inline assembly code
+ // which actually uses it. This would allow LLVM to use $1 for inline
+ // assembly operands if the user's assembly code doesn't use it.
+ return "~{$1}";
}
bool handleTargetFeatures(std::vector<std::string> &Features,
@@ -5863,10 +6231,65 @@ void PNaClTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
} // end anonymous namespace.
namespace {
+class Le64TargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+
+public:
+ Le64TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ BigEndian = false;
+ NoAsmVariants = true;
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ DescriptionString =
+ "e-m:e-v128:32-v16:16-v32:32-v96:32-n8:16:32:64-S128";
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "unix", Opts);
+ defineCPUMacros(Builder, "le64", /*Tuning=*/false);
+ Builder.defineMacro("__ELF__");
+ }
+ void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const override {
+ Records = BuiltinInfo;
+ NumRecords = clang::Le64::LastTSBuiltin - Builtin::FirstTSBuiltin;
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::PNaClABIBuiltinVaList;
+ }
+ const char *getClobbers() const override { return ""; }
+ void getGCCRegNames(const char *const *&Names,
+ unsigned &NumNames) const override {
+ Names = nullptr;
+ NumNames = 0;
+ }
+ void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const override {
+ Aliases = nullptr;
+ NumAliases = 0;
+ }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ return false;
+ }
+
+ bool hasProtectedVisibility() const override { return false; }
+};
+} // end anonymous namespace.
+
+const Builtin::Info Le64TargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#include "clang/Basic/BuiltinsLe64.def"
+};
+
+namespace {
static const unsigned SPIRAddrSpaceMap[] = {
1, // opencl_global
3, // opencl_local
2, // opencl_constant
+ 4, // opencl_generic
0, // cuda_device
0, // cuda_constant
0 // cuda_shared
@@ -5902,8 +6325,9 @@ namespace {
}
void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const override {}
- bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &info) const override {
+ bool
+ validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override {
return true;
}
void getGCCRegAliases(const GCCRegAlias *&Aliases,
@@ -6031,11 +6455,12 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
return new HexagonTargetInfo(Triple);
case llvm::Triple::aarch64:
- case llvm::Triple::arm64:
if (Triple.isOSDarwin())
return new DarwinAArch64TargetInfo(Triple);
switch (os) {
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<AArch64leTargetInfo>(Triple);
case llvm::Triple::Linux:
return new LinuxTargetInfo<AArch64leTargetInfo>(Triple);
case llvm::Triple::NetBSD:
@@ -6045,8 +6470,9 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
}
case llvm::Triple::aarch64_be:
- case llvm::Triple::arm64_be:
switch (os) {
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<AArch64beTargetInfo>(Triple);
case llvm::Triple::Linux:
return new LinuxTargetInfo<AArch64beTargetInfo>(Triple);
case llvm::Triple::NetBSD:
@@ -6185,6 +6611,9 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
return nullptr;
}
+ case llvm::Triple::le64:
+ return new Le64TargetInfo(Triple);
+
case llvm::Triple::ppc:
if (Triple.isOSDarwin())
return new DarwinPPC32TargetInfo(Triple);
@@ -6232,6 +6661,7 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
case llvm::Triple::nvptx64:
return new NVPTX64TargetInfo(Triple);
+ case llvm::Triple::amdgcn:
case llvm::Triple::r600:
return new R600TargetInfo(Triple);
@@ -6239,8 +6669,6 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
switch (os) {
case llvm::Triple::Linux:
return new LinuxTargetInfo<SparcV8TargetInfo>(Triple);
- case llvm::Triple::AuroraUX:
- return new AuroraUXSparcV8TargetInfo(Triple);
case llvm::Triple::Solaris:
return new SolarisSparcV8TargetInfo(Triple);
case llvm::Triple::NetBSD:
@@ -6257,8 +6685,6 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
switch (os) {
case llvm::Triple::Linux:
return new LinuxTargetInfo<SparcV9TargetInfo>(Triple);
- case llvm::Triple::AuroraUX:
- return new AuroraUXTargetInfo<SparcV9TargetInfo>(Triple);
case llvm::Triple::Solaris:
return new SolarisTargetInfo<SparcV9TargetInfo>(Triple);
case llvm::Triple::NetBSD:
@@ -6287,8 +6713,6 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
return new DarwinI386TargetInfo(Triple);
switch (os) {
- case llvm::Triple::AuroraUX:
- return new AuroraUXTargetInfo<X86_32TargetInfo>(Triple);
case llvm::Triple::Linux:
return new LinuxTargetInfo<X86_32TargetInfo>(Triple);
case llvm::Triple::DragonFly:
@@ -6335,8 +6759,6 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
return new DarwinX86_64TargetInfo(Triple);
switch (os) {
- case llvm::Triple::AuroraUX:
- return new AuroraUXTargetInfo<X86_64TargetInfo>(Triple);
case llvm::Triple::Linux:
return new LinuxTargetInfo<X86_64TargetInfo>(Triple);
case llvm::Triple::DragonFly:
diff --git a/lib/Basic/Version.cpp b/lib/Basic/Version.cpp
index 87ecac17649b..9e0b91a84894 100644
--- a/lib/Basic/Version.cpp
+++ b/lib/Basic/Version.cpp
@@ -36,7 +36,7 @@ std::string getClangRepositoryPath() {
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
- StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_350/final/lib/Basic/Version.cpp $");
+ StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_360/rc1/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));
diff --git a/lib/Basic/VersionTuple.cpp b/lib/Basic/VersionTuple.cpp
index 8b781ab0a304..aa43ae298e23 100644
--- a/lib/Basic/VersionTuple.cpp
+++ b/lib/Basic/VersionTuple.cpp
@@ -29,9 +29,9 @@ raw_ostream& clang::operator<<(raw_ostream &Out,
const VersionTuple &V) {
Out << V.getMajor();
if (Optional<unsigned> Minor = V.getMinor())
- Out << '.' << *Minor;
+ Out << (V.usesUnderscores() ? '_' : '.') << *Minor;
if (Optional<unsigned> Subminor = V.getSubminor())
- Out << '.' << *Subminor;
+ Out << (V.usesUnderscores() ? '_' : '.') << *Subminor;
return Out;
}
diff --git a/lib/Basic/VirtualFileSystem.cpp b/lib/Basic/VirtualFileSystem.cpp
index a5c83b88af50..c89195e9326c 100644
--- a/lib/Basic/VirtualFileSystem.cpp
+++ b/lib/Basic/VirtualFileSystem.cpp
@@ -11,10 +11,10 @@
#include "clang/Basic/VirtualFileSystem.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
@@ -67,16 +67,14 @@ File::~File() {}
FileSystem::~FileSystem() {}
-std::error_code FileSystem::getBufferForFile(
- const llvm::Twine &Name, std::unique_ptr<MemoryBuffer> &Result,
- int64_t FileSize, bool RequiresNullTerminator, bool IsVolatile) {
- std::unique_ptr<File> F;
- if (std::error_code EC = openFileForRead(Name, F))
- return EC;
+ErrorOr<std::unique_ptr<MemoryBuffer>>
+FileSystem::getBufferForFile(const llvm::Twine &Name, int64_t FileSize,
+ bool RequiresNullTerminator, bool IsVolatile) {
+ auto F = openFileForRead(Name);
+ if (!F)
+ return F.getError();
- std::error_code EC =
- F->getBuffer(Name, Result, FileSize, RequiresNullTerminator, IsVolatile);
- return EC;
+ return (*F)->getBuffer(Name, FileSize, RequiresNullTerminator, IsVolatile);
}
//===-----------------------------------------------------------------------===/
@@ -96,11 +94,10 @@ class RealFile : public File {
public:
~RealFile();
ErrorOr<Status> status() override;
- std::error_code getBuffer(const Twine &Name,
- std::unique_ptr<MemoryBuffer> &Result,
- int64_t FileSize = -1,
- bool RequiresNullTerminator = true,
- bool IsVolatile = false) override;
+ ErrorOr<std::unique_ptr<MemoryBuffer>>
+ getBuffer(const Twine &Name, int64_t FileSize = -1,
+ bool RequiresNullTerminator = true,
+ bool IsVolatile = false) override;
std::error_code close() override;
void setName(StringRef Name) override;
};
@@ -120,19 +117,12 @@ ErrorOr<Status> RealFile::status() {
return S;
}
-std::error_code RealFile::getBuffer(const Twine &Name,
- std::unique_ptr<MemoryBuffer> &Result,
- int64_t FileSize,
- bool RequiresNullTerminator,
- bool IsVolatile) {
+ErrorOr<std::unique_ptr<MemoryBuffer>>
+RealFile::getBuffer(const Twine &Name, int64_t FileSize,
+ bool RequiresNullTerminator, bool IsVolatile) {
assert(FD != -1 && "cannot get buffer for closed file");
- ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
- MemoryBuffer::getOpenFile(FD, Name.str().c_str(), FileSize,
- RequiresNullTerminator, IsVolatile);
- if (std::error_code EC = BufferOrErr.getError())
- return EC;
- Result = std::move(BufferOrErr.get());
- return std::error_code();
+ return MemoryBuffer::getOpenFile(FD, Name, FileSize, RequiresNullTerminator,
+ IsVolatile);
}
// FIXME: This is terrible, we need this for ::close.
@@ -161,8 +151,7 @@ namespace {
class RealFileSystem : public FileSystem {
public:
ErrorOr<Status> status(const Twine &Path) override;
- std::error_code openFileForRead(const Twine &Path,
- std::unique_ptr<File> &Result) override;
+ ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override;
};
} // end anonymous namespace
@@ -176,14 +165,14 @@ ErrorOr<Status> RealFileSystem::status(const Twine &Path) {
return Result;
}
-std::error_code RealFileSystem::openFileForRead(const Twine &Name,
- std::unique_ptr<File> &Result) {
+ErrorOr<std::unique_ptr<File>>
+RealFileSystem::openFileForRead(const Twine &Name) {
int FD;
if (std::error_code EC = sys::fs::openFileForRead(Name, FD))
return EC;
- Result.reset(new RealFile(FD));
+ std::unique_ptr<File> Result(new RealFile(FD));
Result->setName(Name.str());
- return std::error_code();
+ return std::move(Result);
}
IntrusiveRefCntPtr<FileSystem> vfs::getRealFileSystem() {
@@ -252,14 +241,13 @@ ErrorOr<Status> OverlayFileSystem::status(const Twine &Path) {
return make_error_code(llvm::errc::no_such_file_or_directory);
}
-std::error_code
-OverlayFileSystem::openFileForRead(const llvm::Twine &Path,
- std::unique_ptr<File> &Result) {
+ErrorOr<std::unique_ptr<File>>
+OverlayFileSystem::openFileForRead(const llvm::Twine &Path) {
// FIXME: handle symlinks that cross file systems
for (iterator I = overlays_begin(), E = overlays_end(); I != E; ++I) {
- std::error_code EC = (*I)->openFileForRead(Path, Result);
- if (!EC || EC != llvm::errc::no_such_file_or_directory)
- return EC;
+ auto Result = (*I)->openFileForRead(Path);
+ if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
+ return Result;
}
return make_error_code(llvm::errc::no_such_file_or_directory);
}
@@ -308,7 +296,7 @@ class OverlayFSDirIterImpl : public clang::vfs::detail::DirIterImpl {
}
CurrentEntry = *CurrentDirIter;
StringRef Name = llvm::sys::path::filename(CurrentEntry.getName());
- if (SeenNames.insert(Name))
+ if (SeenNames.insert(Name).second)
return EC; // name not seen before
}
llvm_unreachable("returned above");
@@ -514,16 +502,13 @@ public:
/// \brief Parses \p Buffer, which is expected to be in YAML format and
/// returns a virtual file system representing its contents.
- ///
- /// Takes ownership of \p Buffer.
- static VFSFromYAML *create(MemoryBuffer *Buffer,
+ static VFSFromYAML *create(std::unique_ptr<MemoryBuffer> Buffer,
SourceMgr::DiagHandlerTy DiagHandler,
void *DiagContext,
IntrusiveRefCntPtr<FileSystem> ExternalFS);
ErrorOr<Status> status(const Twine &Path) override;
- std::error_code openFileForRead(const Twine &Path,
- std::unique_ptr<File> &Result) override;
+ ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override{
ErrorOr<Entry *> E = lookupPath(Dir);
@@ -865,13 +850,13 @@ DirectoryEntry::~DirectoryEntry() { llvm::DeleteContainerPointers(Contents); }
VFSFromYAML::~VFSFromYAML() { llvm::DeleteContainerPointers(Roots); }
-VFSFromYAML *VFSFromYAML::create(MemoryBuffer *Buffer,
+VFSFromYAML *VFSFromYAML::create(std::unique_ptr<MemoryBuffer> Buffer,
SourceMgr::DiagHandlerTy DiagHandler,
void *DiagContext,
IntrusiveRefCntPtr<FileSystem> ExternalFS) {
SourceMgr SM;
- yaml::Stream Stream(Buffer, SM);
+ yaml::Stream Stream(Buffer->getMemBufferRef(), SM);
SM.setDiagHandler(DiagHandler, DiagContext);
yaml::document_iterator DI = Stream.begin();
@@ -971,9 +956,7 @@ ErrorOr<Status> VFSFromYAML::status(const Twine &Path) {
return status(Path, *Result);
}
-std::error_code
-VFSFromYAML::openFileForRead(const Twine &Path,
- std::unique_ptr<vfs::File> &Result) {
+ErrorOr<std::unique_ptr<File>> VFSFromYAML::openFileForRead(const Twine &Path) {
ErrorOr<Entry *> E = lookupPath(Path);
if (!E)
return E.getError();
@@ -982,21 +965,22 @@ VFSFromYAML::openFileForRead(const Twine &Path,
if (!F) // FIXME: errc::not_a_file?
return make_error_code(llvm::errc::invalid_argument);
- if (std::error_code EC =
- ExternalFS->openFileForRead(F->getExternalContentsPath(), Result))
- return EC;
+ auto Result = ExternalFS->openFileForRead(F->getExternalContentsPath());
+ if (!Result)
+ return Result;
if (!F->useExternalName(UseExternalNames))
- Result->setName(Path.str());
+ (*Result)->setName(Path.str());
- return std::error_code();
+ return Result;
}
IntrusiveRefCntPtr<FileSystem>
-vfs::getVFSFromYAML(MemoryBuffer *Buffer, SourceMgr::DiagHandlerTy DiagHandler,
- void *DiagContext,
+vfs::getVFSFromYAML(std::unique_ptr<MemoryBuffer> Buffer,
+ SourceMgr::DiagHandlerTy DiagHandler, void *DiagContext,
IntrusiveRefCntPtr<FileSystem> ExternalFS) {
- return VFSFromYAML::create(Buffer, DiagHandler, DiagContext, ExternalFS);
+ return VFSFromYAML::create(std::move(Buffer), DiagHandler, DiagContext,
+ ExternalFS);
}
UniqueID vfs::getNextVirtualUniqueID() {
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index d3ec46c4c4a1..7e7f7fa20679 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_ABIINFO_H
-#define CLANG_CODEGEN_ABIINFO_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
+#define LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
#include "clang/AST/Type.h"
#include "llvm/IR/CallingConv.h"
@@ -44,9 +44,12 @@ namespace clang {
CodeGen::CodeGenTypes &CGT;
protected:
llvm::CallingConv::ID RuntimeCC;
+ llvm::CallingConv::ID BuiltinCC;
public:
ABIInfo(CodeGen::CodeGenTypes &cgt)
- : CGT(cgt), RuntimeCC(llvm::CallingConv::C) {}
+ : CGT(cgt),
+ RuntimeCC(llvm::CallingConv::C),
+ BuiltinCC(llvm::CallingConv::C) {}
virtual ~ABIInfo();
@@ -62,6 +65,11 @@ namespace clang {
return RuntimeCC;
}
+ /// Return the calling convention to use for compiler builtins
+ llvm::CallingConv::ID getBuiltinCC() const {
+ return BuiltinCC;
+ }
+
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
/// EmitVAArg - Emit the target dependent code to load a value of
@@ -73,6 +81,15 @@ namespace clang {
// abstract this out.
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGen::CodeGenFunction &CGF) const = 0;
+
+ virtual bool isHomogeneousAggregateBaseType(QualType Ty) const;
+
+ virtual bool isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const;
+
+ bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ uint64_t &Members) const;
+
};
} // end namespace clang
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index cec48f35a2e9..25ecec586244 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -33,11 +33,13 @@
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/ObjCARC.h"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/SymbolRewriter.h"
#include <memory>
using namespace clang;
using namespace llvm;
@@ -61,7 +63,7 @@ private:
PassManager *getCodeGenPasses() const {
if (!CodeGenPasses) {
CodeGenPasses = new PassManager();
- CodeGenPasses->add(new DataLayoutPass(TheModule));
+ CodeGenPasses->add(new DataLayoutPass());
if (TM)
TM->addAnalysisPasses(*CodeGenPasses);
}
@@ -71,7 +73,7 @@ private:
PassManager *getPerModulePasses() const {
if (!PerModulePasses) {
PerModulePasses = new PassManager();
- PerModulePasses->add(new DataLayoutPass(TheModule));
+ PerModulePasses->add(new DataLayoutPass());
if (TM)
TM->addAnalysisPasses(*PerModulePasses);
}
@@ -81,7 +83,7 @@ private:
FunctionPassManager *getPerFunctionPasses() const {
if (!PerFunctionPasses) {
PerFunctionPasses = new FunctionPassManager(TheModule);
- PerFunctionPasses->add(new DataLayoutPass(TheModule));
+ PerFunctionPasses->add(new DataLayoutPass());
if (TM)
TM->addAnalysisPasses(*PerFunctionPasses);
}
@@ -121,7 +123,7 @@ public:
delete PerModulePasses;
delete PerFunctionPasses;
if (CodeGenOpts.DisableFree)
- BuryPointer(TM.release());
+ BuryPointer(std::move(TM));
}
std::unique_ptr<TargetMachine> TM;
@@ -178,6 +180,14 @@ static void addBoundsCheckingPass(const PassManagerBuilder &Builder,
PM.add(createBoundsCheckingPass());
}
+static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ const PassManagerBuilderWrapper &BuilderWrapper =
+ static_cast<const PassManagerBuilderWrapper&>(Builder);
+ const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
+ PM.add(createSanitizerCoverageModulePass(CGOpts.SanitizeCoverage));
+}
+
static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
PassManagerBase &PM) {
PM.add(createAddressSanitizerFunctionPass());
@@ -213,8 +223,27 @@ static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder,
PassManagerBase &PM) {
const PassManagerBuilderWrapper &BuilderWrapper =
static_cast<const PassManagerBuilderWrapper&>(Builder);
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- PM.add(createDataFlowSanitizerPass(CGOpts.SanitizerBlacklistFile));
+ const LangOptions &LangOpts = BuilderWrapper.getLangOpts();
+ PM.add(createDataFlowSanitizerPass(LangOpts.SanitizerBlacklistFile));
+}
+
+static TargetLibraryInfo *createTLI(llvm::Triple &TargetTriple,
+ const CodeGenOptions &CodeGenOpts) {
+ TargetLibraryInfo *TLI = new TargetLibraryInfo(TargetTriple);
+ if (!CodeGenOpts.SimplifyLibCalls)
+ TLI->disableAllFunctions();
+ return TLI;
+}
+
+static void addSymbolRewriterPass(const CodeGenOptions &Opts,
+ PassManager *MPM) {
+ llvm::SymbolRewriter::RewriteDescriptorList DL;
+
+ llvm::SymbolRewriter::RewriteMapParser MapParser;
+ for (const auto &MapFile : Opts.RewriteMapFiles)
+ MapParser.parse(MapFile, &DL);
+
+ MPM->add(createRewriteSymbolsPass(DL));
}
void EmitAssemblyHelper::CreatePasses() {
@@ -238,6 +267,7 @@ void EmitAssemblyHelper::CreatePasses() {
PMBuilder.DisableTailCalls = CodeGenOpts.DisableTailCalls;
PMBuilder.DisableUnitAtATime = !CodeGenOpts.UnitAtATime;
PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
+ PMBuilder.MergeFunctions = CodeGenOpts.MergeFunctions;
PMBuilder.RerollLoops = CodeGenOpts.RerollLoops;
PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
@@ -257,35 +287,42 @@ void EmitAssemblyHelper::CreatePasses() {
addObjCARCOptPass);
}
- if (LangOpts.Sanitize.LocalBounds) {
+ if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds)) {
PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
addBoundsCheckingPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
addBoundsCheckingPass);
}
- if (LangOpts.Sanitize.Address) {
+ if (CodeGenOpts.SanitizeCoverage) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addSanitizerCoveragePass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addSanitizerCoveragePass);
+ }
+
+ if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addAddressSanitizerPasses);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
addAddressSanitizerPasses);
}
- if (LangOpts.Sanitize.Memory) {
+ if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addMemorySanitizerPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
addMemorySanitizerPass);
}
- if (LangOpts.Sanitize.Thread) {
+ if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addThreadSanitizerPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
addThreadSanitizerPass);
}
- if (LangOpts.Sanitize.DataFlow) {
+ if (LangOpts.Sanitize.has(SanitizerKind::DataFlow)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addDataFlowSanitizerPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
@@ -294,9 +331,7 @@ void EmitAssemblyHelper::CreatePasses() {
// Figure out TargetLibraryInfo.
Triple TargetTriple(TheModule->getTargetTriple());
- PMBuilder.LibraryInfo = new TargetLibraryInfo(TargetTriple);
- if (!CodeGenOpts.SimplifyLibCalls)
- PMBuilder.LibraryInfo->disableAllFunctions();
+ PMBuilder.LibraryInfo = createTLI(TargetTriple, CodeGenOpts);
switch (Inlining) {
case CodeGenOptions::NoInlining: break;
@@ -323,6 +358,8 @@ void EmitAssemblyHelper::CreatePasses() {
// Set up the per-module pass manager.
PassManager *MPM = getPerModulePasses();
+ if (!CodeGenOpts.RewriteMapFiles.empty())
+ addSymbolRewriterPass(CodeGenOpts, MPM);
if (CodeGenOpts.VerifyModule)
MPM->add(createDebugInfoVerifierPass());
@@ -343,6 +380,12 @@ void EmitAssemblyHelper::CreatePasses() {
MPM->add(createStripSymbolsPass(true));
}
+ if (CodeGenOpts.ProfileInstrGenerate) {
+ InstrProfOptions Options;
+ Options.NoRedZone = CodeGenOpts.DisableRedZone;
+ MPM->add(createInstrProfilingPass(Options));
+ }
+
PMBuilder.populateModulePassManager(*MPM);
}
@@ -418,6 +461,11 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
llvm::TargetOptions Options;
+ Options.ThreadModel =
+ llvm::StringSwitch<llvm::ThreadModel::Model>(CodeGenOpts.ThreadModel)
+ .Case("posix", llvm::ThreadModel::POSIX)
+ .Case("single", llvm::ThreadModel::Single);
+
if (CodeGenOpts.DisableIntegratedAS)
Options.DisableIntegratedAS = true;
@@ -476,7 +524,9 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
Options.MCOptions.MCUseDwarfDirectory = !CodeGenOpts.NoDwarfDirectoryAsm;
Options.MCOptions.MCNoExecStack = CodeGenOpts.NoExecStack;
+ Options.MCOptions.MCFatalWarnings = CodeGenOpts.FatalWarnings;
Options.MCOptions.AsmVerbose = CodeGenOpts.AsmVerbose;
+ Options.MCOptions.ABIName = TargetOpts.ABI;
TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
FeaturesStr, Options,
@@ -493,10 +543,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
// Add LibraryInfo.
llvm::Triple TargetTriple(TheModule->getTargetTriple());
- TargetLibraryInfo *TLI = new TargetLibraryInfo(TargetTriple);
- if (!CodeGenOpts.SimplifyLibCalls)
- TLI->disableAllFunctions();
- PM->add(TLI);
+ PM->add(createTLI(TargetTriple, CodeGenOpts));
// Add Target specific analysis passes.
TM->addAnalysisPasses(*PM);
@@ -600,8 +647,9 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
// If an optional clang TargetInfo description string was passed in, use it to
// verify the LLVM TargetMachine's DataLayout.
if (AsmHelper.TM && !TDesc.empty()) {
- std::string DLDesc =
- AsmHelper.TM->getDataLayout()->getStringRepresentation();
+ std::string DLDesc = AsmHelper.TM->getSubtargetImpl()
+ ->getDataLayout()
+ ->getStringRepresentation();
if (DLDesc != TDesc) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "backend data layout '%0' does not match "
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 89bde2ce20d9..daac174c8e0c 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -46,23 +46,26 @@ namespace {
ASTContext &C = CGF.getContext();
- uint64_t valueAlignInBits;
- std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
+ uint64_t ValueAlignInBits;
+ uint64_t AtomicAlignInBits;
+ TypeInfo ValueTI = C.getTypeInfo(ValueTy);
+ ValueSizeInBits = ValueTI.Width;
+ ValueAlignInBits = ValueTI.Align;
- uint64_t atomicAlignInBits;
- std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
+ TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
+ AtomicSizeInBits = AtomicTI.Width;
+ AtomicAlignInBits = AtomicTI.Align;
assert(ValueSizeInBits <= AtomicSizeInBits);
- assert(valueAlignInBits <= atomicAlignInBits);
+ assert(ValueAlignInBits <= AtomicAlignInBits);
- AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
- ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
+ AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
+ ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
if (lvalue.getAlignment().isZero())
lvalue.setAlignment(AtomicAlign);
- UseLibcall =
- (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
- AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
+ UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
+ AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
}
QualType getAtomicType() const { return AtomicTy; }
@@ -70,7 +73,7 @@ namespace {
CharUnits getAtomicAlignment() const { return AtomicAlign; }
CharUnits getValueAlignment() const { return ValueAlign; }
uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
- uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
+ uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
bool shouldUseLibcall() const { return UseLibcall; }
@@ -100,6 +103,12 @@ namespace {
AggValueSlot resultSlot,
SourceLocation loc) const;
+ /// \brief Converts a rvalue to integer value.
+ llvm::Value *convertRValueToInt(RValue RVal) const;
+
+ RValue convertIntToValue(llvm::Value *IntVal, AggValueSlot ResultSlot,
+ SourceLocation Loc) const;
+
/// Copy an atomic r-value into atomic-layout memory.
void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
@@ -461,11 +470,19 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
static void
AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
- SourceLocation Loc) {
+ SourceLocation Loc, CharUnits SizeInChars) {
if (UseOptimizedLibcall) {
// Load value and pass it to the function directly.
unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
- Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
+ int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
+ ValTy =
+ CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
+ llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
+ SizeInBits)->getPointerTo();
+ Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
+ Align, CGF.getContext().getPointerType(ValTy),
+ Loc);
+ // Coerce the value into an appropriately sized integer type.
Args.add(RValue::get(Val), ValTy);
} else {
// Non-optimized functions always take a reference.
@@ -576,8 +593,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
break;
}
- if (!E->getType()->isVoidType() && !Dest)
- Dest = CreateMemTemp(E->getType(), ".atomicdst");
+ QualType RValTy = E->getType().getUnqualifiedType();
+
+ auto GetDest = [&] {
+ if (!RValTy->isVoidType() && !Dest) {
+ Dest = CreateMemTemp(RValTy, ".atomicdst");
+ }
+ return Dest;
+ };
// Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
if (UseLibcall) {
@@ -634,7 +657,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
HaveRetTy = true;
Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
Args.add(RValue::get(Order), getContext().IntTy);
Order = OrderFail;
break;
@@ -646,7 +669,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__atomic_exchange:
LibCallName = "__atomic_exchange";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// void __atomic_store(size_t size, void *mem, void *val, int order)
// void __atomic_store_N(T *mem, T val, int order)
@@ -657,7 +680,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
RetTy = getContext().VoidTy;
HaveRetTy = true;
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// void __atomic_load(size_t size, void *mem, void *return, int order)
// T __atomic_load_N(T *mem, int order)
@@ -671,35 +694,35 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__atomic_fetch_add:
LibCallName = "__atomic_fetch_add";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_and_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
LibCallName = "__atomic_fetch_and";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_or_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
LibCallName = "__atomic_fetch_or";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_sub_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
LibCallName = "__atomic_fetch_sub";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_xor_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
LibCallName = "__atomic_fetch_xor";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
default: return EmitUnsupportedRValue(E, "atomic library call");
}
@@ -711,29 +734,36 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
if (!HaveRetTy) {
if (UseOptimizedLibcall) {
// Value is returned directly.
- RetTy = MemTy;
+ // The function returns an appropriately sized integer type.
+ RetTy = getContext().getIntTypeForBitwidth(
+ getContext().toBits(sizeChars), /*Signed=*/false);
} else {
// Value is returned through parameter before the order.
RetTy = getContext().VoidTy;
- Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
- getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
}
}
// order is always the last parameter
Args.add(RValue::get(Order),
getContext().IntTy);
- const CGFunctionInfo &FuncInfo =
- CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
- FunctionType::ExtInfo(), RequiredArgs::All);
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
- llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
- RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
- if (!RetTy->isVoidType())
+ RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
+ // The value is returned directly from the libcall.
+ if (HaveRetTy && !RetTy->isVoidType())
return Res;
- if (E->getType()->isVoidType())
+ // The value is returned via an explicit out param.
+ if (RetTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
+ // The value is returned directly for optimized libcalls but the caller is
+ // expected an out-param.
+ if (UseOptimizedLibcall) {
+ llvm::Value *ResVal = Res.getScalarVal();
+ llvm::StoreInst *StoreDest = Builder.CreateStore(
+ ResVal,
+ Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
+ StoreDest->setAlignment(Align);
+ }
+ return convertTempToRValue(Dest, RValTy, E->getExprLoc());
}
bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
@@ -743,13 +773,15 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
E->getOp() == AtomicExpr::AO__atomic_load ||
E->getOp() == AtomicExpr::AO__atomic_load_n;
- llvm::Type *IPtrTy =
- llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
- llvm::Value *OrigDest = Dest;
- Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
- if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
- if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
- if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
+ llvm::Type *ITy =
+ llvm::IntegerType::get(getLLVMContext(), Size * 8);
+ llvm::Value *OrigDest = GetDest();
+ Ptr = Builder.CreateBitCast(
+ Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
+ if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
+ if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
+ if (Dest && !E->isCmpXChg())
+ Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
@@ -786,9 +818,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// enforce that in general.
break;
}
- if (E->getType()->isVoidType())
+ if (RValTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
+ return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
}
// Long case, when Order isn't obviously constant.
@@ -854,9 +886,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// Cleanup and return
Builder.SetInsertPoint(ContBB);
- if (E->getType()->isVoidType())
+ if (RValTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
+ return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
}
llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
@@ -882,6 +914,45 @@ RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
return CGF.convertTempToRValue(addr, getValueType(), loc);
}
+RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
+ AggValueSlot ResultSlot,
+ SourceLocation Loc) const {
+ // Try not to in some easy cases.
+ assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
+ if (getEvaluationKind() == TEK_Scalar && !hasPadding()) {
+ auto *ValTy = CGF.ConvertTypeForMem(ValueTy);
+ if (ValTy->isIntegerTy()) {
+ assert(IntVal->getType() == ValTy && "Different integer types.");
+ return RValue::get(IntVal);
+ } else if (ValTy->isPointerTy())
+ return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
+ else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
+ return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
+ }
+
+ // Create a temporary. This needs to be big enough to hold the
+ // atomic integer.
+ llvm::Value *Temp;
+ bool TempIsVolatile = false;
+ CharUnits TempAlignment;
+ if (getEvaluationKind() == TEK_Aggregate) {
+ assert(!ResultSlot.isIgnored());
+ Temp = ResultSlot.getAddr();
+ TempAlignment = getValueAlignment();
+ TempIsVolatile = ResultSlot.isVolatile();
+ } else {
+ Temp = CGF.CreateMemTemp(getAtomicType(), "atomic-temp");
+ TempAlignment = getAtomicAlignment();
+ }
+
+ // Slam the integer into the temporary.
+ llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
+ CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
+ ->setVolatile(TempIsVolatile);
+
+ return convertTempToRValue(Temp, ResultSlot, Loc);
+}
+
/// Emit a load from an l-value of atomic type. Note that the r-value
/// we produce is an r-value of the atomic *value* type.
RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
@@ -927,50 +998,12 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
if (src.getTBAAInfo())
CGM.DecorateInstruction(load, src.getTBAAInfo());
- // Okay, turn that back into the original value type.
- QualType valueType = atomics.getValueType();
- llvm::Value *result = load;
-
// If we're ignoring an aggregate return, don't do anything.
if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
return RValue::getAggregate(nullptr, false);
- // The easiest way to do this this is to go through memory, but we
- // try not to in some easy cases.
- if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
- llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
- if (isa<llvm::IntegerType>(resultTy)) {
- assert(result->getType() == resultTy);
- result = EmitFromMemory(result, valueType);
- } else if (isa<llvm::PointerType>(resultTy)) {
- result = Builder.CreateIntToPtr(result, resultTy);
- } else {
- result = Builder.CreateBitCast(result, resultTy);
- }
- return RValue::get(result);
- }
-
- // Create a temporary. This needs to be big enough to hold the
- // atomic integer.
- llvm::Value *temp;
- bool tempIsVolatile = false;
- CharUnits tempAlignment;
- if (atomics.getEvaluationKind() == TEK_Aggregate) {
- assert(!resultSlot.isIgnored());
- temp = resultSlot.getAddr();
- tempAlignment = atomics.getValueAlignment();
- tempIsVolatile = resultSlot.isVolatile();
- } else {
- temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
- tempAlignment = atomics.getAtomicAlignment();
- }
-
- // Slam the integer into the temporary.
- llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
- Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
- ->setVolatile(tempIsVolatile);
-
- return atomics.convertTempToRValue(temp, resultSlot, loc);
+ // Okay, turn that back into the original value type.
+ return atomics.convertIntToValue(load, resultSlot, loc);
}
@@ -1023,6 +1056,32 @@ llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
return temp;
}
+llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
+ // If we've got a scalar value of the right size, try to avoid going
+ // through memory.
+ if (RVal.isScalar() && !hasPadding()) {
+ llvm::Value *Value = RVal.getScalarVal();
+ if (isa<llvm::IntegerType>(Value->getType()))
+ return Value;
+ else {
+ llvm::IntegerType *InputIntTy =
+ llvm::IntegerType::get(CGF.getLLVMContext(), getValueSizeInBits());
+ if (isa<llvm::PointerType>(Value->getType()))
+ return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
+ else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
+ return CGF.Builder.CreateBitCast(Value, InputIntTy);
+ }
+ }
+ // Otherwise, we need to go through memory.
+ // Put the r-value in memory.
+ llvm::Value *Addr = materializeRValue(RVal);
+
+ // Cast the temporary to the atomic int type and pull a value out.
+ Addr = emitCastToAtomicIntPointer(Addr);
+ return CGF.Builder.CreateAlignedLoad(Addr,
+ getAtomicAlignment().getQuantity());
+}
+
/// Emit a store to an l-value of atomic type.
///
/// Note that the r-value is expected to be an r-value *of the atomic
@@ -1064,34 +1123,7 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
}
// Okay, we're doing this natively.
- llvm::Value *intValue;
-
- // If we've got a scalar value of the right size, try to avoid going
- // through memory.
- if (rvalue.isScalar() && !atomics.hasPadding()) {
- llvm::Value *value = rvalue.getScalarVal();
- if (isa<llvm::IntegerType>(value->getType())) {
- intValue = value;
- } else {
- llvm::IntegerType *inputIntTy =
- llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
- if (isa<llvm::PointerType>(value->getType())) {
- intValue = Builder.CreatePtrToInt(value, inputIntTy);
- } else {
- intValue = Builder.CreateBitCast(value, inputIntTy);
- }
- }
-
- // Otherwise, we need to go through memory.
- } else {
- // Put the r-value in memory.
- llvm::Value *addr = atomics.materializeRValue(rvalue);
-
- // Cast the temporary to the atomic int type and pull a value out.
- addr = atomics.emitCastToAtomicIntPointer(addr);
- intValue = Builder.CreateAlignedLoad(addr,
- atomics.getAtomicAlignment().getQuantity());
- }
+ llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
// Do the atomic store.
llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
@@ -1108,6 +1140,74 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
CGM.DecorateInstruction(store, dest.getTBAAInfo());
}
+/// Emit a compare-and-exchange op for atomic type.
+///
+std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
+ LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
+ llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
+ AggValueSlot Slot) {
+ // If this is an aggregate r-value, it should agree in type except
+ // maybe for address-space qualification.
+ assert(!Expected.isAggregate() ||
+ Expected.getAggregateAddr()->getType()->getPointerElementType() ==
+ Obj.getAddress()->getType()->getPointerElementType());
+ assert(!Desired.isAggregate() ||
+ Desired.getAggregateAddr()->getType()->getPointerElementType() ==
+ Obj.getAddress()->getType()->getPointerElementType());
+ AtomicInfo Atomics(*this, Obj);
+
+ if (Failure >= Success)
+ // Don't assert on undefined behavior.
+ Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
+
+ auto Alignment = Atomics.getValueAlignment();
+ // Check whether we should use a library call.
+ if (Atomics.shouldUseLibcall()) {
+ auto *ExpectedAddr = Atomics.materializeRValue(Expected);
+ // Produce a source address.
+ auto *DesiredAddr = Atomics.materializeRValue(Desired);
+ // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
+ // void *desired, int success, int failure);
+ CallArgList Args;
+ Args.add(RValue::get(Atomics.getAtomicSizeValue()),
+ getContext().getSizeType());
+ Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)),
+ getContext().IntTy);
+ Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)),
+ getContext().IntTy);
+ auto SuccessFailureRVal = emitAtomicLibcall(
+ *this, "__atomic_compare_exchange", getContext().BoolTy, Args);
+ auto *PreviousVal =
+ Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity());
+ return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal);
+ }
+
+ // If we've got a scalar value of the right size, try to avoid going
+ // through memory.
+ auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected);
+ auto *DesiredIntVal = Atomics.convertRValueToInt(Desired);
+
+ // Do the atomic store.
+ auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress());
+ auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal,
+ Success, Failure);
+ // Other decoration.
+ Inst->setVolatile(Obj.isVolatileQualified());
+ Inst->setWeak(IsWeak);
+
+ // Okay, turn that back into the original value type.
+ auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0);
+ auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1);
+ return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc),
+ RValue::get(SuccessFailureVal));
+}
+
void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
AtomicInfo atomics(*this, dest);
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 72fde9dc55f1..b98460a9ddd8 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -545,6 +545,16 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// multiple of alignment.
for (SmallVectorImpl<BlockLayoutChunk>::iterator
li = layout.begin(), le = layout.end(); li != le; ++li) {
+ if (endAlign < li->Alignment) {
+ // size may not be multiple of alignment. This can only happen with
+ // an over-aligned variable. We will be adding a padding field to
+ // make the size be multiple of alignment.
+ CharUnits padding = li->Alignment - endAlign;
+ elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty,
+ padding.getQuantity()));
+ blockSize += padding;
+ endAlign = getLowBit(blockSize);
+ }
assert(endAlign >= li->Alignment);
li->setIndex(info, elementTypes.size());
elementTypes.push_back(li->Type);
@@ -782,9 +792,10 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// emission.
src = LocalDeclMap.lookup(variable);
if (!src) {
- DeclRefExpr declRef(const_cast<VarDecl *>(variable),
- /*refersToEnclosing*/ CI.isNested(), type,
- VK_LValue, SourceLocation());
+ DeclRefExpr declRef(
+ const_cast<VarDecl *>(variable),
+ /*RefersToEnclosingVariableOrCapture*/ CI.isNested(), type,
+ VK_LValue, SourceLocation());
src = EmitDeclRefLValue(&declRef).getAddress();
}
}
@@ -853,12 +864,15 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// We use one of these or the other depending on whether the
// reference is nested.
- DeclRefExpr declRef(const_cast<VarDecl*>(variable),
- /*refersToEnclosing*/ CI.isNested(), type,
- VK_LValue, SourceLocation());
+ DeclRefExpr declRef(const_cast<VarDecl *>(variable),
+ /*RefersToEnclosingVariableOrCapture*/ CI.isNested(),
+ type, VK_LValue, SourceLocation());
ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
&declRef, VK_RValue);
+ // FIXME: Pass a specific location for the expr init so that the store is
+ // attributed to a reasonable location - otherwise it may be attributed to
+ // locations of subexpressions in the initialization.
EmitExprAsInit(&l2r, &blockFieldPseudoVar,
MakeAddrLValue(blockField, type, align),
/*captured by init*/ false);
@@ -905,7 +919,7 @@ llvm::Type *CodeGenModule::getBlockDescriptorType() {
// };
BlockDescriptorType =
llvm::StructType::create("struct.__block_descriptor",
- UnsignedLongTy, UnsignedLongTy, NULL);
+ UnsignedLongTy, UnsignedLongTy, nullptr);
// Now form a pointer to that.
BlockDescriptorType = llvm::PointerType::getUnqual(BlockDescriptorType);
@@ -928,7 +942,7 @@ llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
GenericBlockLiteralType =
llvm::StructType::create("struct.__block_literal_generic",
VoidPtrTy, IntTy, IntTy, VoidPtrTy,
- BlockDescPtrTy, NULL);
+ BlockDescPtrTy, nullptr);
return GenericBlockLiteralType;
}
@@ -1093,6 +1107,8 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
CurGD = GD;
+
+ CurEHLocation = blockInfo.getBlockExpr()->getLocEnd();
BlockInfo = &blockInfo;
@@ -1162,7 +1178,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
Alloca->setAlignment(Align);
// Set the DebugLocation to empty, so the store is recognized as a
// frame setup instruction by llvm::DwarfDebug::beginFunction().
- NoLocation NL(*this, Builder);
+ ApplyDebugLocation NL(*this);
Builder.CreateAlignedStore(BlockPointer, Alloca, Align);
BlockPointerDbgLoc = Alloca;
}
@@ -1205,8 +1221,6 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
RegionCounter Cnt = getPGORegionCounter(blockDecl->getBody());
Cnt.beginRegion(Builder);
EmitStmt(blockDecl->getBody());
- PGO.emitInstrumentationData();
- PGO.destroyRegionCounters();
}
// Remember where we were...
@@ -1233,7 +1247,9 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
}
DI->EmitDeclareOfBlockDeclRefVariable(variable, BlockPointerDbgLoc,
- Builder, blockInfo);
+ Builder, blockInfo,
+ entry_ptr == entry->end()
+ ? nullptr : entry_ptr);
}
}
// Recover location if it was changed in the above loop.
@@ -1313,9 +1329,9 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
false,
false);
// Create a scope with an artificial location for the body of this function.
- ArtificialLocation AL(*this, Builder);
+ ApplyDebugLocation NL(*this);
StartFunction(FD, C.VoidTy, Fn, FI, args);
- AL.Emit();
+ ArtificialLocation AL(*this);
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
@@ -1484,9 +1500,9 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
nullptr, SC_Static,
false, false);
// Create a scope with an artificial location for the body of this function.
- ArtificialLocation AL(*this, Builder);
+ ApplyDebugLocation NL(*this);
StartFunction(FD, C.VoidTy, Fn, FI, args);
- AL.Emit();
+ ArtificialLocation AL(*this);
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 0031e32c9daf..c4eed0d0e8eb 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGBLOCKS_H
-#define CLANG_CODEGEN_CGBLOCKS_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGBLOCKS_H
+#define LLVM_CLANG_LIB_CODEGEN_CGBLOCKS_H
#include "CGBuilder.h"
#include "CGCall.h"
diff --git a/lib/CodeGen/CGBuilder.h b/lib/CodeGen/CGBuilder.h
index f113b970b7b7..72ba4faa3c7c 100644
--- a/lib/CodeGen/CGBuilder.h
+++ b/lib/CodeGen/CGBuilder.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGBUILDER_H
-#define CLANG_CODEGEN_CGBUILDER_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
+#define LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
#include "llvm/IR/IRBuilder.h"
@@ -18,7 +18,7 @@ namespace CodeGen {
class CodeGenFunction;
/// \brief This is an IRBuilder insertion helper that forwards to
-/// CodeGenFunction::InsertHelper, which adds nesessary metadata to
+/// CodeGenFunction::InsertHelper, which adds necessary metadata to
/// instructions.
template <bool PreserveNames>
class CGBuilderInserter
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 4f68b347dbf0..635e34207de7 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -20,7 +20,9 @@
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
using namespace clang;
@@ -113,7 +115,8 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E,
- Instruction::BinaryOps Op) {
+ Instruction::BinaryOps Op,
+ bool Invert = false) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
@@ -138,36 +141,25 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
llvm::SequentiallyConsistent);
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
+ if (Invert)
+ Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
+ llvm::ConstantInt::get(IntType, -1));
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
}
-/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
-/// which must be a scalar floating point type.
-static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
- const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
- assert(ValTyP && "isn't scalar fp type!");
-
- StringRef FnName;
- switch (ValTyP->getKind()) {
- default: llvm_unreachable("Isn't a scalar fp type!");
- case BuiltinType::Float: FnName = "fabsf"; break;
- case BuiltinType::Double: FnName = "fabs"; break;
- case BuiltinType::LongDouble: FnName = "fabsl"; break;
- }
-
- // The prototype is something that takes and returns whatever V's type is.
- llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
- false);
- llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
-
- return CGF.EmitNounwindRuntimeCall(Fn, V, "abs");
+/// EmitFAbs - Emit a call to @llvm.fabs().
+static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
+ Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
+ llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
+ Call->setDoesNotAccessMemory();
+ return Call;
}
static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
const CallExpr *E, llvm::Value *calleeValue) {
- return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E->getLocStart(),
- ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn);
+ return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E,
+ ReturnValueSlot(), Fn);
}
/// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
@@ -195,7 +187,8 @@ static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
}
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
- unsigned BuiltinID, const CallExpr *E) {
+ unsigned BuiltinID, const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
// See if we can constant fold this builtin. If so, don't emit it at all.
Expr::EvalResult Result;
if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
@@ -255,6 +248,21 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabsl: {
+ Value *Arg1 = EmitScalarExpr(E->getArg(0));
+ Value *Result = EmitFAbs(*this, Arg1);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_fmod:
+ case Builtin::BI__builtin_fmodf:
+ case Builtin::BI__builtin_fmodl: {
+ Value *Arg1 = EmitScalarExpr(E->getArg(0));
+ Value *Arg2 = EmitScalarExpr(E->getArg(1));
+ Value *Result = Builder.CreateFRem(Arg1, Arg2, "fmod");
+ return RValue::get(Result);
+ }
case Builtin::BI__builtin_conj:
case Builtin::BI__builtin_conjf:
@@ -388,6 +396,27 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"expval");
return RValue::get(Result);
}
+ case Builtin::BI__builtin_assume_aligned: {
+ Value *PtrValue = EmitScalarExpr(E->getArg(0));
+ Value *OffsetValue =
+ (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
+
+ Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
+ ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
+ unsigned Alignment = (unsigned) AlignmentCI->getZExtValue();
+
+ EmitAlignmentAssumption(PtrValue, Alignment, OffsetValue);
+ return RValue::get(PtrValue);
+ }
+ case Builtin::BI__assume:
+ case Builtin::BI__builtin_assume: {
+ if (E->getArg(0)->HasSideEffects(getContext()))
+ return RValue::get(nullptr);
+
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
+ return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
+ }
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
@@ -447,11 +476,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_unreachable: {
- if (SanOpts->Unreachable) {
+ if (SanOpts.has(SanitizerKind::Unreachable)) {
SanitizerScope SanScope(this);
- EmitCheck(Builder.getFalse(), "builtin_unreachable",
- EmitCheckSourceLocation(E->getExprLoc()),
- ArrayRef<llvm::Value *>(), CRK_Unrecoverable);
+ EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
+ SanitizerKind::Unreachable),
+ "builtin_unreachable", EmitCheckSourceLocation(E->getExprLoc()),
+ None);
} else
Builder.CreateUnreachable();
@@ -515,7 +545,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_isinf: {
// isinf(x) --> fabs(x) == infinity
Value *V = EmitScalarExpr(E->getArg(0));
- V = EmitFAbs(*this, V, E->getArg(0)->getType());
+ V = EmitFAbs(*this, V);
V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
@@ -529,7 +559,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
- Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *Abs = EmitFAbs(*this, V);
Value *IsLessThanInf =
Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
APFloat Smallest = APFloat::getSmallestNormalized(
@@ -547,7 +577,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
- Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *Abs = EmitFAbs(*this, V);
Value *IsNotInf =
Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
@@ -586,7 +616,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// if (fabs(V) == infinity) return FP_INFINITY
Builder.SetInsertPoint(NotNan);
- Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType());
+ Value *VAbs = EmitFAbs(*this, V);
Value *IsInf =
Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
"isinf");
@@ -864,11 +894,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_fetch_and_or:
case Builtin::BI__sync_fetch_and_and:
case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_fetch_and_nand:
case Builtin::BI__sync_add_and_fetch:
case Builtin::BI__sync_sub_and_fetch:
case Builtin::BI__sync_and_and_fetch:
case Builtin::BI__sync_or_and_fetch:
case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_nand_and_fetch:
case Builtin::BI__sync_val_compare_and_swap:
case Builtin::BI__sync_bool_compare_and_swap:
case Builtin::BI__sync_lock_test_and_set:
@@ -905,6 +937,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_fetch_and_xor_8:
case Builtin::BI__sync_fetch_and_xor_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
+ case Builtin::BI__sync_fetch_and_nand_1:
+ case Builtin::BI__sync_fetch_and_nand_2:
+ case Builtin::BI__sync_fetch_and_nand_4:
+ case Builtin::BI__sync_fetch_and_nand_8:
+ case Builtin::BI__sync_fetch_and_nand_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
// Clang extensions: not overloaded yet.
case Builtin::BI__sync_fetch_and_min:
@@ -951,6 +989,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_xor_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
llvm::Instruction::Xor);
+ case Builtin::BI__sync_nand_and_fetch_1:
+ case Builtin::BI__sync_nand_and_fetch_2:
+ case Builtin::BI__sync_nand_and_fetch_4:
+ case Builtin::BI__sync_nand_and_fetch_8:
+ case Builtin::BI__sync_nand_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
+ llvm::Instruction::And, true);
case Builtin::BI__sync_val_compare_and_swap_1:
case Builtin::BI__sync_val_compare_and_swap_2:
@@ -1347,11 +1392,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgTy = Arg->getType();
- if (ArgTy->isPPC_FP128Ty())
- break; // FIXME: I'm not sure what the right implementation is here.
int ArgWidth = ArgTy->getPrimitiveSizeInBits();
llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
+ if (ArgTy->isPPC_FP128Ty()) {
+ // The higher-order double comes first, and so we need to truncate the
+ // pair to extract the overall sign. The order of the pair is the same
+ // in both little- and big-Endian modes.
+ ArgWidth >>= 1;
+ ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
+ BCArg = Builder.CreateTrunc(BCArg, ArgIntTy);
+ }
Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
@@ -1518,9 +1569,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__noop:
// __noop always evaluates to an integer literal zero.
return RValue::get(ConstantInt::get(IntTy, 0));
- case Builtin::BI__assume:
- // Until LLVM supports assumptions at the IR level, this becomes nothing.
- return RValue::get(nullptr);
+ case Builtin::BI__builtin_call_with_static_chain: {
+ const CallExpr *Call = cast<CallExpr>(E->getArg(0));
+ const Expr *Chain = E->getArg(1);
+ return EmitCall(Call->getCallee()->getType(),
+ EmitScalarExpr(Call->getCallee()), Call, ReturnValue,
+ Call->getCalleeDecl(), EmitScalarExpr(Chain));
+ }
case Builtin::BI_InterlockedExchange:
case Builtin::BI_InterlockedExchangePointer:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
@@ -1587,6 +1642,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
RMWI->setVolatile(true);
return RValue::get(RMWI);
}
+ case Builtin::BI__readfsdword: {
+ Value *IntToPtr =
+ Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
+ llvm::PointerType::get(CGM.Int32Ty, 257));
+ LoadInst *Load =
+ Builder.CreateAlignedLoad(IntToPtr, /*Align=*/4, /*isVolatile=*/true);
+ return RValue::get(Load);
+ }
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
@@ -1690,8 +1753,6 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
return EmitARMBuiltinExpr(BuiltinID, E);
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- case llvm::Triple::arm64:
- case llvm::Triple::arm64_be:
return EmitAArch64BuiltinExpr(BuiltinID, E);
case llvm::Triple::x86:
case llvm::Triple::x86_64:
@@ -1701,6 +1762,7 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
case llvm::Triple::ppc64le:
return EmitPPCBuiltinExpr(BuiltinID, E);
case llvm::Triple::r600:
+ case llvm::Triple::amdgcn:
return EmitR600BuiltinExpr(BuiltinID, E);
default:
return nullptr;
@@ -2005,8 +2067,12 @@ static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
NEONMAP1(vld4q_v, arm_neon_vld4, 0),
NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
+ NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
+ NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
@@ -2042,6 +2108,8 @@ static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
+ NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
@@ -2051,8 +2119,22 @@ static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
+ NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
+ NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
+ NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
+ NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
+ NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
+ NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
+ NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
+ NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
+ NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
+ NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
+ NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
+ NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
@@ -2173,6 +2255,8 @@ static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
+ NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
@@ -2184,6 +2268,8 @@ static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
+ NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
@@ -2828,6 +2914,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vqshlq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
1, false);
+ case NEON::BI__builtin_neon_vqshlu_n_v:
+ case NEON::BI__builtin_neon_vqshluq_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
+ 1, false);
case NEON::BI__builtin_neon_vrecpe_v:
case NEON::BI__builtin_neon_vrecpeq_v:
case NEON::BI__builtin_neon_vrsqrte_v:
@@ -2835,6 +2925,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
+ case NEON::BI__builtin_neon_vrshr_n_v:
+ case NEON::BI__builtin_neon_vrshrq_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
+ 1, true);
case NEON::BI__builtin_neon_vshl_n_v:
case NEON::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
@@ -3039,39 +3133,76 @@ static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
return CGF.EmitNeonCall(TblF, TblOps, Name);
}
-Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
- unsigned HintID = static_cast<unsigned>(-1);
+Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
switch (BuiltinID) {
- default: break;
+ default:
+ return nullptr;
case ARM::BI__builtin_arm_nop:
- HintID = 0;
- break;
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, 0));
case ARM::BI__builtin_arm_yield:
case ARM::BI__yield:
- HintID = 1;
- break;
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, 1));
case ARM::BI__builtin_arm_wfe:
case ARM::BI__wfe:
- HintID = 2;
- break;
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, 2));
case ARM::BI__builtin_arm_wfi:
case ARM::BI__wfi:
- HintID = 3;
- break;
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, 3));
case ARM::BI__builtin_arm_sev:
case ARM::BI__sev:
- HintID = 4;
- break;
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, 4));
case ARM::BI__builtin_arm_sevl:
case ARM::BI__sevl:
- HintID = 5;
- break;
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, 5));
}
+}
- if (HintID != static_cast<unsigned>(-1)) {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_hint);
- return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
+Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ if (auto Hint = GetValueForARMHint(BuiltinID))
+ return Hint;
+
+ if (BuiltinID == ARM::BI__emit) {
+ bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
+
+ APSInt Value;
+ if (!E->getArg(0)->EvaluateAsInt(Value, CGM.getContext()))
+ llvm_unreachable("Sema will ensure that the parameter is constant");
+
+ uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
+
+ llvm::InlineAsm *Emit =
+ IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
+ /*SideEffects=*/true)
+ : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
+ /*SideEffects=*/true);
+
+ return Builder.CreateCall(Emit);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_dbg) {
+ Value *Option = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *RW = EmitScalarExpr(E->getArg(1));
+ Value *IsData = EmitScalarExpr(E->getArg(2));
+
+ // Locality is not supported on ARM target
+ Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
+
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ return Builder.CreateCall4(F, Address, RW, Locality, IsData);
}
if (BuiltinID == ARM::BI__builtin_arm_rbit) {
@@ -3157,7 +3288,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
? Intrinsic::arm_stlexd
: Intrinsic::arm_strexd);
- llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
+ llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, nullptr);
Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
Value *Val = EmitScalarExpr(E->getArg(0));
@@ -3393,7 +3524,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Many NEON builtins have identical semantics and uses in ARM and
// AArch64. Emit these in a single function.
- ArrayRef<NeonIntrinsicInfo> IntrinsicMap(ARMSIMDIntrinsicMap);
+ auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
if (Builtin)
@@ -3500,10 +3631,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vqrshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
Ops, "vqrshrun_n", 1, true);
- case NEON::BI__builtin_neon_vqshlu_n_v:
- case NEON::BI__builtin_neon_vqshluq_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty),
- Ops, "vqshlu", 1, false);
case NEON::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
@@ -3518,10 +3645,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vrshrn_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
Ops, "vrshrn_n", 1, true);
- case NEON::BI__builtin_neon_vrshr_n_v:
- case NEON::BI__builtin_neon_vrshrq_n_v:
- Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
case NEON::BI__builtin_neon_vrsra_n_v:
case NEON::BI__builtin_neon_vrsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -3836,6 +3959,29 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
}
+ if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *RW = EmitScalarExpr(E->getArg(1));
+ Value *CacheLevel = EmitScalarExpr(E->getArg(2));
+ Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
+ Value *IsData = EmitScalarExpr(E->getArg(4));
+
+ Value *Locality = nullptr;
+ if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
+ // Temporal fetch, needs to convert cache level to locality.
+ Locality = llvm::ConstantInt::get(Int32Ty,
+ -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
+ } else {
+ // Streaming fetch.
+ Locality = llvm::ConstantInt::get(Int32Ty, 0);
+ }
+
+ // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
+ // PLDL3STRM or PLDL2STRM.
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ return Builder.CreateCall4(F, Address, RW, Locality, IsData);
+ }
+
if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
assert((getContext().getTypeSize(E->getType()) == 32) &&
"rbit of unusual size!");
@@ -3913,7 +4059,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
? Intrinsic::aarch64_stlxp
: Intrinsic::aarch64_stxp);
- llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, NULL);
+ llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, nullptr);
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(ConvertType(E->getArg(0)->getType()),
@@ -3994,7 +4140,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
- ArrayRef<NeonIntrinsicInfo> SISDMap(AArch64SISDIntrinsicMap);
+ auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
@@ -4675,38 +4821,19 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f64Type),
Ops, "vrecps");
}
- case NEON::BI__builtin_neon_vrshr_n_v:
- case NEON::BI__builtin_neon_vrshrq_n_v:
- // FIXME: this can be shared with 32-bit ARM, but not AArch64 at the
- // moment. After the final merge it should be added to
- // EmitCommonNeonBuiltinExpr.
- Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
- case NEON::BI__builtin_neon_vqshlu_n_v:
- case NEON::BI__builtin_neon_vqshluq_n_v:
- // FIXME: AArch64 and ARM use different intrinsics for this, but are
- // essentially compatible. It should be in EmitCommonNeonBuiltinExpr after
- // the final merge.
- Int = Intrinsic::aarch64_neon_sqshlu;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", 1, false);
case NEON::BI__builtin_neon_vqshrun_n_v:
- // FIXME: as above
Int = Intrinsic::aarch64_neon_sqshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
case NEON::BI__builtin_neon_vqrshrun_n_v:
- // FIXME: and again.
Int = Intrinsic::aarch64_neon_sqrshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
case NEON::BI__builtin_neon_vqshrn_n_v:
- // FIXME: guess
Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
case NEON::BI__builtin_neon_vrshrn_n_v:
- // FIXME: there might be a pattern here.
Int = Intrinsic::aarch64_neon_rshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
case NEON::BI__builtin_neon_vqrshrn_n_v:
- // FIXME: another one
Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
case NEON::BI__builtin_neon_vrnda_v:
@@ -5435,8 +5562,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateZExt(Ops[3],
llvm::IntegerType::get(getLLVMContext(), 64));
- Ops[1] = Builder.CreateCall(F,
- ArrayRef<Value*>(Ops).slice(1), "vld2_lane");
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -5452,8 +5578,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateZExt(Ops[4],
llvm::IntegerType::get(getLLVMContext(), 64));
- Ops[1] = Builder.CreateCall(F,
- ArrayRef<Value*>(Ops).slice(1), "vld3_lane");
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -5470,8 +5595,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateZExt(Ops[5],
llvm::IntegerType::get(getLLVMContext(), 64));
- Ops[1] = Builder.CreateCall(F,
- ArrayRef<Value*>(Ops).slice(1), "vld4_lane");
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -5757,7 +5881,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
- return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ return Builder.CreateCall(F, makeArrayRef(Ops.data(), 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
@@ -5787,7 +5911,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
- return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ return Builder.CreateCall(F, makeArrayRef(Ops.data(), 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
@@ -5825,7 +5949,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq);
- return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ return Builder.CreateCall(F, makeArrayRef(Ops.data(), 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
@@ -5839,8 +5963,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_movntdq256:
case X86::BI__builtin_ia32_movnti:
case X86::BI__builtin_ia32_movnti64: {
- llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(),
- Builder.getInt32(1));
+ llvm::MDNode *Node = llvm::MDNode::get(
+ getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
// Convert the type of the pointer to a pointer to the stored type.
Value *BC = Builder.CreateBitCast(Ops[0],
@@ -5863,8 +5987,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi: {
- const char *name = nullptr;
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ const char *name;
+ Intrinsic::ID ID;
switch(BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_pswapdsf:
@@ -5918,6 +6042,154 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *F = CGM.getIntrinsic(Intrinsic::x86_avx2_vbroadcasti128);
return Builder.CreateCall(F, Builder.CreateBitCast(VecTmp, Int8PtrTy));
}
+ // SSE comparison intrisics
+ case X86::BI__builtin_ia32_cmpeqps:
+ case X86::BI__builtin_ia32_cmpltps:
+ case X86::BI__builtin_ia32_cmpleps:
+ case X86::BI__builtin_ia32_cmpunordps:
+ case X86::BI__builtin_ia32_cmpneqps:
+ case X86::BI__builtin_ia32_cmpnltps:
+ case X86::BI__builtin_ia32_cmpnleps:
+ case X86::BI__builtin_ia32_cmpordps:
+ case X86::BI__builtin_ia32_cmpeqss:
+ case X86::BI__builtin_ia32_cmpltss:
+ case X86::BI__builtin_ia32_cmpless:
+ case X86::BI__builtin_ia32_cmpunordss:
+ case X86::BI__builtin_ia32_cmpneqss:
+ case X86::BI__builtin_ia32_cmpnltss:
+ case X86::BI__builtin_ia32_cmpnless:
+ case X86::BI__builtin_ia32_cmpordss:
+ case X86::BI__builtin_ia32_cmpeqpd:
+ case X86::BI__builtin_ia32_cmpltpd:
+ case X86::BI__builtin_ia32_cmplepd:
+ case X86::BI__builtin_ia32_cmpunordpd:
+ case X86::BI__builtin_ia32_cmpneqpd:
+ case X86::BI__builtin_ia32_cmpnltpd:
+ case X86::BI__builtin_ia32_cmpnlepd:
+ case X86::BI__builtin_ia32_cmpordpd:
+ case X86::BI__builtin_ia32_cmpeqsd:
+ case X86::BI__builtin_ia32_cmpltsd:
+ case X86::BI__builtin_ia32_cmplesd:
+ case X86::BI__builtin_ia32_cmpunordsd:
+ case X86::BI__builtin_ia32_cmpneqsd:
+ case X86::BI__builtin_ia32_cmpnltsd:
+ case X86::BI__builtin_ia32_cmpnlesd:
+ case X86::BI__builtin_ia32_cmpordsd:
+ // These exist so that the builtin that takes an immediate can be bounds
+ // checked by clang to avoid passing bad immediates to the backend. Since
+ // AVX has a larger immediate than SSE we would need separate builtins to
+ // do the different bounds checking. Rather than create a clang specific
+ // SSE only builtin, this implements eight separate builtins to match gcc
+ // implementation.
+
+ // Choose the immediate.
+ unsigned Imm;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_cmpeqps:
+ case X86::BI__builtin_ia32_cmpeqss:
+ case X86::BI__builtin_ia32_cmpeqpd:
+ case X86::BI__builtin_ia32_cmpeqsd:
+ Imm = 0;
+ break;
+ case X86::BI__builtin_ia32_cmpltps:
+ case X86::BI__builtin_ia32_cmpltss:
+ case X86::BI__builtin_ia32_cmpltpd:
+ case X86::BI__builtin_ia32_cmpltsd:
+ Imm = 1;
+ break;
+ case X86::BI__builtin_ia32_cmpleps:
+ case X86::BI__builtin_ia32_cmpless:
+ case X86::BI__builtin_ia32_cmplepd:
+ case X86::BI__builtin_ia32_cmplesd:
+ Imm = 2;
+ break;
+ case X86::BI__builtin_ia32_cmpunordps:
+ case X86::BI__builtin_ia32_cmpunordss:
+ case X86::BI__builtin_ia32_cmpunordpd:
+ case X86::BI__builtin_ia32_cmpunordsd:
+ Imm = 3;
+ break;
+ case X86::BI__builtin_ia32_cmpneqps:
+ case X86::BI__builtin_ia32_cmpneqss:
+ case X86::BI__builtin_ia32_cmpneqpd:
+ case X86::BI__builtin_ia32_cmpneqsd:
+ Imm = 4;
+ break;
+ case X86::BI__builtin_ia32_cmpnltps:
+ case X86::BI__builtin_ia32_cmpnltss:
+ case X86::BI__builtin_ia32_cmpnltpd:
+ case X86::BI__builtin_ia32_cmpnltsd:
+ Imm = 5;
+ break;
+ case X86::BI__builtin_ia32_cmpnleps:
+ case X86::BI__builtin_ia32_cmpnless:
+ case X86::BI__builtin_ia32_cmpnlepd:
+ case X86::BI__builtin_ia32_cmpnlesd:
+ Imm = 6;
+ break;
+ case X86::BI__builtin_ia32_cmpordps:
+ case X86::BI__builtin_ia32_cmpordss:
+ case X86::BI__builtin_ia32_cmpordpd:
+ case X86::BI__builtin_ia32_cmpordsd:
+ Imm = 7;
+ break;
+ }
+
+ // Choose the intrinsic ID.
+ const char *name;
+ Intrinsic::ID ID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_cmpeqps:
+ case X86::BI__builtin_ia32_cmpltps:
+ case X86::BI__builtin_ia32_cmpleps:
+ case X86::BI__builtin_ia32_cmpunordps:
+ case X86::BI__builtin_ia32_cmpneqps:
+ case X86::BI__builtin_ia32_cmpnltps:
+ case X86::BI__builtin_ia32_cmpnleps:
+ case X86::BI__builtin_ia32_cmpordps:
+ name = "cmpps";
+ ID = Intrinsic::x86_sse_cmp_ps;
+ break;
+ case X86::BI__builtin_ia32_cmpeqss:
+ case X86::BI__builtin_ia32_cmpltss:
+ case X86::BI__builtin_ia32_cmpless:
+ case X86::BI__builtin_ia32_cmpunordss:
+ case X86::BI__builtin_ia32_cmpneqss:
+ case X86::BI__builtin_ia32_cmpnltss:
+ case X86::BI__builtin_ia32_cmpnless:
+ case X86::BI__builtin_ia32_cmpordss:
+ name = "cmpss";
+ ID = Intrinsic::x86_sse_cmp_ss;
+ break;
+ case X86::BI__builtin_ia32_cmpeqpd:
+ case X86::BI__builtin_ia32_cmpltpd:
+ case X86::BI__builtin_ia32_cmplepd:
+ case X86::BI__builtin_ia32_cmpunordpd:
+ case X86::BI__builtin_ia32_cmpneqpd:
+ case X86::BI__builtin_ia32_cmpnltpd:
+ case X86::BI__builtin_ia32_cmpnlepd:
+ case X86::BI__builtin_ia32_cmpordpd:
+ name = "cmppd";
+ ID = Intrinsic::x86_sse2_cmp_pd;
+ break;
+ case X86::BI__builtin_ia32_cmpeqsd:
+ case X86::BI__builtin_ia32_cmpltsd:
+ case X86::BI__builtin_ia32_cmplesd:
+ case X86::BI__builtin_ia32_cmpunordsd:
+ case X86::BI__builtin_ia32_cmpneqsd:
+ case X86::BI__builtin_ia32_cmpnltsd:
+ case X86::BI__builtin_ia32_cmpnlesd:
+ case X86::BI__builtin_ia32_cmpordsd:
+ name = "cmpsd";
+ ID = Intrinsic::x86_sse2_cmp_sd;
+ break;
+ }
+
+ Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, name);
}
}
@@ -5942,6 +6214,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_lvewx:
case PPC::BI__builtin_altivec_lvsl:
case PPC::BI__builtin_altivec_lvsr:
+ case PPC::BI__builtin_vsx_lxvd2x:
+ case PPC::BI__builtin_vsx_lxvw4x:
{
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
@@ -5971,6 +6245,12 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_lvsr:
ID = Intrinsic::ppc_altivec_lvsr;
break;
+ case PPC::BI__builtin_vsx_lxvd2x:
+ ID = Intrinsic::ppc_vsx_lxvd2x;
+ break;
+ case PPC::BI__builtin_vsx_lxvw4x:
+ ID = Intrinsic::ppc_vsx_lxvw4x;
+ break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
@@ -5982,6 +6262,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_stvebx:
case PPC::BI__builtin_altivec_stvehx:
case PPC::BI__builtin_altivec_stvewx:
+ case PPC::BI__builtin_vsx_stxvd2x:
+ case PPC::BI__builtin_vsx_stxvw4x:
{
Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
@@ -6004,6 +6286,12 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_stvewx:
ID = Intrinsic::ppc_altivec_stvewx;
break;
+ case PPC::BI__builtin_vsx_stxvd2x:
+ ID = Intrinsic::ppc_vsx_stxvd2x;
+ break;
+ case PPC::BI__builtin_vsx_stxvw4x:
+ ID = Intrinsic::ppc_vsx_stxvw4x;
+ break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
@@ -6033,6 +6321,17 @@ static Value *emitTernaryFPBuiltin(CodeGenFunction &CGF,
return CGF.Builder.CreateCall3(F, Src0, Src1, Src2);
}
+// Emit an intrinsic that has 1 float or double operand, and 1 integer.
+static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
+ const CallExpr *E,
+ unsigned IntrinsicID) {
+ llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
+
+ Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
+ return CGF.Builder.CreateCall2(F, Src0, Src1);
+}
+
Value *CodeGenFunction::EmitR600BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
@@ -6065,18 +6364,23 @@ Value *CodeGenFunction::EmitR600BuiltinExpr(unsigned BuiltinID,
return Result;
}
case R600::BI__builtin_amdgpu_div_fmas:
- case R600::BI__builtin_amdgpu_div_fmasf:
- return emitTernaryFPBuiltin(*this, E, Intrinsic::AMDGPU_div_fmas);
+ case R600::BI__builtin_amdgpu_div_fmasf: {
+ llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+ llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
+ llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
+
+ llvm::Value *F = CGM.getIntrinsic(Intrinsic::AMDGPU_div_fmas,
+ Src0->getType());
+ llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
+ return Builder.CreateCall4(F, Src0, Src1, Src2, Src3ToBool);
+ }
case R600::BI__builtin_amdgpu_div_fixup:
case R600::BI__builtin_amdgpu_div_fixupf:
return emitTernaryFPBuiltin(*this, E, Intrinsic::AMDGPU_div_fixup);
case R600::BI__builtin_amdgpu_trig_preop:
- case R600::BI__builtin_amdgpu_trig_preopf: {
- Value *Src0 = EmitScalarExpr(E->getArg(0));
- Value *Src1 = EmitScalarExpr(E->getArg(1));
- Value *F = CGM.getIntrinsic(Intrinsic::AMDGPU_trig_preop, Src0->getType());
- return Builder.CreateCall2(F, Src0, Src1);
- }
+ case R600::BI__builtin_amdgpu_trig_preopf:
+ return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_trig_preop);
case R600::BI__builtin_amdgpu_rcp:
case R600::BI__builtin_amdgpu_rcpf:
return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rcp);
@@ -6086,6 +6390,12 @@ Value *CodeGenFunction::EmitR600BuiltinExpr(unsigned BuiltinID,
case R600::BI__builtin_amdgpu_rsq_clamped:
case R600::BI__builtin_amdgpu_rsq_clampedf:
return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq_clamped);
+ case R600::BI__builtin_amdgpu_ldexp:
+ case R600::BI__builtin_amdgpu_ldexpf:
+ return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_ldexp);
+ case R600::BI__builtin_amdgpu_class:
+ case R600::BI__builtin_amdgpu_classf:
+ return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_class);
default:
return nullptr;
}
diff --git a/lib/CodeGen/CGCUDARuntime.cpp b/lib/CodeGen/CGCUDARuntime.cpp
index 29e0a91a63fe..014a5dbd46d6 100644
--- a/lib/CodeGen/CGCUDARuntime.cpp
+++ b/lib/CodeGen/CGCUDARuntime.cpp
@@ -45,8 +45,7 @@ RValue CGCUDARuntime::EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
}
llvm::Value *Callee = CGF.EmitScalarExpr(E->getCallee());
- CGF.EmitCall(E->getCallee()->getType(), Callee, E->getLocStart(),
- ReturnValue, E->arg_begin(), E->arg_end(), TargetDecl);
+ CGF.EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue, TargetDecl);
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock);
diff --git a/lib/CodeGen/CGCUDARuntime.h b/lib/CodeGen/CGCUDARuntime.h
index a99a67ae1ae7..8c162fb05ab9 100644
--- a/lib/CodeGen/CGCUDARuntime.h
+++ b/lib/CodeGen/CGCUDARuntime.h
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CUDARUNTIME_H
-#define CLANG_CODEGEN_CUDARUNTIME_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
namespace clang {
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 545c5ef9f827..9f0e67e42176 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -44,12 +44,13 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (!D->hasTrivialBody())
return true;
- // For exported destructors, we need a full definition.
- if (D->hasAttr<DLLExportAttr>())
- return true;
-
const CXXRecordDecl *Class = D->getParent();
+ // We are going to instrument this destructor, so give up even if it is
+ // currently empty.
+ if (Class->mayInsertExtraPadding())
+ return true;
+
// If we need to manipulate a VTT parameter, give up.
if (Class->getNumVBases()) {
// Extra Credit: passing extra parameters is perfectly safe
@@ -123,6 +124,11 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
if (!llvm::GlobalAlias::isValidLinkage(Linkage))
return true;
+ // Don't create a weak alias for a dllexport'd symbol.
+ if (AliasDecl.getDecl()->hasAttr<DLLExportAttr>() &&
+ llvm::GlobalValue::isWeakForLinker(Linkage))
+ return true;
+
llvm::GlobalValue::LinkageTypes TargetLinkage =
getFunctionLinkage(TargetDecl);
@@ -161,9 +167,9 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
}
if (!InEveryTU) {
- /// If we don't have a definition for the destructor yet, don't
- /// emit. We can't emit aliases to declarations; that's just not
- /// how aliases work.
+ // If we don't have a definition for the destructor yet, don't
+ // emit. We can't emit aliases to declarations; that's just not
+ // how aliases work.
if (Ref->isDeclaration())
return true;
}
@@ -191,114 +197,55 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
}
// Finally, set up the alias with its proper name and attributes.
- SetCommonAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
+ setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
return false;
}
-void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *ctor,
- CXXCtorType ctorType) {
- if (!getTarget().getCXXABI().hasConstructorVariants()) {
- // If there are no constructor variants, always emit the complete destructor.
- ctorType = Ctor_Complete;
- } else if (!ctor->getParent()->getNumVBases() &&
- (ctorType == Ctor_Complete || ctorType == Ctor_Base)) {
- // The complete constructor is equivalent to the base constructor
- // for classes with no virtual bases. Try to emit it as an alias.
- bool ProducedAlias =
- !TryEmitDefinitionAsAlias(GlobalDecl(ctor, Ctor_Complete),
- GlobalDecl(ctor, Ctor_Base), true);
- if (ctorType == Ctor_Complete && ProducedAlias)
- return;
- }
+llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
+ StructorType Type) {
+ const CGFunctionInfo &FnInfo =
+ getTypes().arrangeCXXStructorDeclaration(MD, Type);
+ auto *Fn = cast<llvm::Function>(
+ getAddrOfCXXStructor(MD, Type, &FnInfo, nullptr, true));
- const CGFunctionInfo &fnInfo =
- getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
-
- auto *fn = cast<llvm::Function>(
- GetAddrOfCXXConstructor(ctor, ctorType, &fnInfo, true));
- setFunctionLinkage(GlobalDecl(ctor, ctorType), fn);
-
- CodeGenFunction(*this).GenerateCode(GlobalDecl(ctor, ctorType), fn, fnInfo);
-
- setFunctionDefinitionAttributes(ctor, fn);
- SetLLVMFunctionAttributesForDefinition(ctor, fn);
-}
-
-llvm::GlobalValue *
-CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
- CXXCtorType ctorType,
- const CGFunctionInfo *fnInfo,
- bool DontDefer) {
- GlobalDecl GD(ctor, ctorType);
-
- StringRef name = getMangledName(GD);
- if (llvm::GlobalValue *existing = GetGlobalValue(name))
- return existing;
-
- if (!fnInfo)
- fnInfo = &getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
+ GlobalDecl GD;
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ GD = GlobalDecl(DD, toCXXDtorType(Type));
+ } else {
+ const auto *CD = cast<CXXConstructorDecl>(MD);
+ GD = GlobalDecl(CD, toCXXCtorType(Type));
+ }
- llvm::FunctionType *fnType = getTypes().GetFunctionType(*fnInfo);
- return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
- /*ForVTable=*/false,
- DontDefer));
+ setFunctionLinkage(GD, Fn);
+ CodeGenFunction(*this).GenerateCode(GD, Fn, FnInfo);
+ setFunctionDefinitionAttributes(MD, Fn);
+ SetLLVMFunctionAttributesForDefinition(MD, Fn);
+ return Fn;
}
-void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *dtor,
- CXXDtorType dtorType) {
- // The complete destructor is equivalent to the base destructor for
- // classes with no virtual bases, so try to emit it as an alias.
- if (!dtor->getParent()->getNumVBases() &&
- (dtorType == Dtor_Complete || dtorType == Dtor_Base)) {
- bool ProducedAlias =
- !TryEmitDefinitionAsAlias(GlobalDecl(dtor, Dtor_Complete),
- GlobalDecl(dtor, Dtor_Base), true);
- if (ProducedAlias) {
- if (dtorType == Dtor_Complete)
- return;
- if (dtor->isVirtual())
- getVTables().EmitThunks(GlobalDecl(dtor, Dtor_Complete));
- }
+llvm::GlobalValue *CodeGenModule::getAddrOfCXXStructor(
+ const CXXMethodDecl *MD, StructorType Type, const CGFunctionInfo *FnInfo,
+ llvm::FunctionType *FnType, bool DontDefer) {
+ GlobalDecl GD;
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+ GD = GlobalDecl(CD, toCXXCtorType(Type));
+ } else {
+ auto *DD = dyn_cast<CXXDestructorDecl>(MD);
+ GD = GlobalDecl(DD, toCXXDtorType(Type));
}
- // The base destructor is equivalent to the base destructor of its
- // base class if there is exactly one non-virtual base class with a
- // non-trivial destructor, there are no fields with a non-trivial
- // destructor, and the body of the destructor is trivial.
- if (dtorType == Dtor_Base && !TryEmitBaseDestructorAsAlias(dtor))
- return;
+ StringRef Name = getMangledName(GD);
+ if (llvm::GlobalValue *Existing = GetGlobalValue(Name))
+ return Existing;
- const CGFunctionInfo &fnInfo =
- getTypes().arrangeCXXDestructor(dtor, dtorType);
-
- auto *fn = cast<llvm::Function>(
- GetAddrOfCXXDestructor(dtor, dtorType, &fnInfo, nullptr, true));
- setFunctionLinkage(GlobalDecl(dtor, dtorType), fn);
-
- CodeGenFunction(*this).GenerateCode(GlobalDecl(dtor, dtorType), fn, fnInfo);
-
- setFunctionDefinitionAttributes(dtor, fn);
- SetLLVMFunctionAttributesForDefinition(dtor, fn);
-}
-
-llvm::GlobalValue *
-CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
- CXXDtorType dtorType,
- const CGFunctionInfo *fnInfo,
- llvm::FunctionType *fnType,
- bool DontDefer) {
- GlobalDecl GD(dtor, dtorType);
-
- StringRef name = getMangledName(GD);
- if (llvm::GlobalValue *existing = GetGlobalValue(name))
- return existing;
-
- if (!fnType) {
- if (!fnInfo) fnInfo = &getTypes().arrangeCXXDestructor(dtor, dtorType);
- fnType = getTypes().GetFunctionType(*fnInfo);
+ if (!FnType) {
+ if (!FnInfo)
+ FnInfo = &getTypes().arrangeCXXStructorDeclaration(MD, Type);
+ FnType = getTypes().GetFunctionType(*FnInfo);
}
- return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
+
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FnType, GD,
/*ForVTable=*/false,
DontDefer));
}
@@ -360,8 +307,8 @@ CodeGenFunction::BuildAppleKextVirtualDestructorCall(
// -O does that. But need to support -O0 as well.
if (MD->isVirtual() && Type != Dtor_Base) {
// Compute the function type we're calling.
- const CGFunctionInfo &FInfo =
- CGM.getTypes().arrangeCXXDestructor(DD, Dtor_Complete);
+ const CGFunctionInfo &FInfo = CGM.getTypes().arrangeCXXStructorDeclaration(
+ DD, StructorType::Complete);
llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
return ::BuildAppleKextVirtualCall(*this, GlobalDecl(DD, Type), Ty, RD);
}
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index 55ddd666c490..d31331de6868 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -246,17 +246,6 @@ llvm::Value *CGCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
return llvm::ConstantInt::get(CGF.SizeTy, 0);
}
-void CGCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
- const VarDecl &D,
- llvm::Constant *dtor,
- llvm::Constant *addr) {
- if (D.getTLSKind())
- CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
-
- // The default behavior is to use atexit.
- CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
-}
-
/// Returns the adjustment, in bytes, required for the given
/// member-pointer operation. Returns null if no adjustment is
/// required.
@@ -310,18 +299,6 @@ CGCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
return nullptr;
}
-void CGCXXABI::EmitThreadLocalInitFuncs(
- ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls,
- llvm::Function *InitFunc) {
-}
-
-LValue CGCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
- const VarDecl *VD,
- QualType LValType) {
- ErrorUnsupportedABI(CGF, "odr-use of thread_local global");
- return LValue();
-}
-
bool CGCXXABI::NeedsVTTParameter(GlobalDecl GD) {
return false;
}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index 91e49707bae6..cc5c1b2e0ae6 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CXXABI_H
-#define CLANG_CODEGEN_CXXABI_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGCXXABI_H
+#define LLVM_CLANG_LIB_CODEGEN_CGCXXABI_H
#include "CodeGenFunction.h"
#include "clang/Basic/LLVM.h"
@@ -93,6 +93,8 @@ public:
/// when called virtually, and code generation does not support the case.
virtual bool HasThisReturn(GlobalDecl GD) const { return false; }
+ virtual bool hasMostDerivedReturn(GlobalDecl GD) const { return false; }
+
/// If the C++ ABI requires the given type be returned in a particular way,
/// this method sets RetAI and returns true.
virtual bool classifyReturnType(CGFunctionInfo &FI) const = 0;
@@ -156,6 +158,15 @@ public:
/// (in the C++ sense) with an LLVM zeroinitializer.
virtual bool isZeroInitializable(const MemberPointerType *MPT);
+ /// Return whether or not a member pointers type is convertible to an IR type.
+ virtual bool isMemberPointerConvertible(const MemberPointerType *MPT) const {
+ return true;
+ }
+
+ virtual bool isTypeInfoCalculable(QualType Ty) const {
+ return !Ty->isIncompleteType();
+ }
+
/// Create a null member pointer of the given type.
virtual llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
@@ -198,14 +209,11 @@ protected:
CharUnits getMemberPointerPathAdjustment(const APValue &MP);
public:
- /// Adjust the given non-null pointer to an object of polymorphic
- /// type to point to the complete object.
- ///
- /// The IR type of the result should be a pointer but is otherwise
- /// irrelevant.
- virtual llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF,
- llvm::Value *ptr,
- QualType type) = 0;
+ virtual void emitVirtualObjectDelete(CodeGenFunction &CGF,
+ const CXXDeleteExpr *DE,
+ llvm::Value *Ptr, QualType ElementType,
+ const CXXDestructorDecl *Dtor) = 0;
+ virtual void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) = 0;
virtual llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) = 0;
@@ -236,20 +244,6 @@ public:
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) = 0;
- /// Build the signature of the given constructor variant by adding
- /// any required parameters. For convenience, ArgTys has been initialized
- /// with the type of 'this' and ResTy has been initialized with the type of
- /// 'this' if HasThisReturn(GlobalDecl(Ctor, T)) is true or 'void' otherwise
- /// (although both may be changed by the ABI).
- ///
- /// If there are ever any ABIs where the implicit parameters are
- /// intermixed with the formal parameters, we can address those
- /// then.
- virtual void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
- CXXCtorType T,
- CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) = 0;
-
virtual llvm::BasicBlock *EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
const CXXRecordDecl *RD);
@@ -262,15 +256,11 @@ public:
/// Emit constructor variants required by this ABI.
virtual void EmitCXXConstructors(const CXXConstructorDecl *D) = 0;
- /// Build the signature of the given destructor variant by adding
- /// any required parameters. For convenience, ArgTys has been initialized
- /// with the type of 'this' and ResTy has been initialized with the type of
- /// 'this' if HasThisReturn(GlobalDecl(Dtor, T)) is true or 'void' otherwise
- /// (although both may be changed by the ABI).
- virtual void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
- CXXDtorType T,
- CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) = 0;
+ /// Build the signature of the given constructor or destructor variant by
+ /// adding any required parameters. For convenience, ArgTys has been
+ /// initialized with the type of 'this'.
+ virtual void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ SmallVectorImpl<CanQualType> &ArgTys) = 0;
/// Returns true if the given destructor type should be emitted as a linkonce
/// delegating thunk, regardless of whether the dtor is defined in this TU or
@@ -368,11 +358,10 @@ public:
llvm::Type *Ty) = 0;
/// Emit the ABI-specific virtual destructor call.
- virtual void EmitVirtualDestructorCall(CodeGenFunction &CGF,
- const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType,
- SourceLocation CallLoc,
- llvm::Value *This) = 0;
+ virtual llvm::Value *
+ EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor,
+ CXXDtorType DtorType, llvm::Value *This,
+ const CXXMemberCallExpr *CE) = 0;
virtual void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF,
GlobalDecl GD,
@@ -397,6 +386,9 @@ public:
virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType);
+ virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
+ FunctionArgList &Args) const = 0;
+
/// Gets the pure virtual member call function.
virtual StringRef GetPureVirtualCallName() = 0;
@@ -490,30 +482,44 @@ public:
/// Emit code to force the execution of a destructor during global
/// teardown. The default implementation of this uses atexit.
///
- /// \param dtor - a function taking a single pointer argument
- /// \param addr - a pointer to pass to the destructor function.
+ /// \param Dtor - a function taking a single pointer argument
+ /// \param Addr - a pointer to pass to the destructor function.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
- llvm::Constant *dtor, llvm::Constant *addr);
+ llvm::Constant *Dtor,
+ llvm::Constant *Addr) = 0;
/*************************** thread_local initialization ********************/
/// Emits ABI-required functions necessary to initialize thread_local
/// variables in this translation unit.
///
- /// \param Decls The thread_local declarations in this translation unit.
- /// \param InitFunc If this translation unit contains any non-constant
- /// initialization or non-trivial destruction for thread_local
- /// variables, a function to perform the initialization. Otherwise, 0.
+ /// \param CXXThreadLocals - The thread_local declarations in this translation
+ /// unit.
+ /// \param CXXThreadLocalInits - If this translation unit contains any
+ /// non-constant initialization or non-trivial destruction for
+ /// thread_local variables, a list of functions to perform the
+ /// initialization.
virtual void EmitThreadLocalInitFuncs(
- ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls,
- llvm::Function *InitFunc);
+ CodeGenModule &CGM,
+ ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
+ CXXThreadLocals,
+ ArrayRef<llvm::Function *> CXXThreadLocalInits,
+ ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) = 0;
+
+ // Determine if references to thread_local global variables can be made
+ // directly or require access through a thread wrapper function.
+ virtual bool usesThreadWrapperFunction() const = 0;
/// Emit a reference to a non-local thread_local variable (including
/// triggering the initialization of all thread_local variables in its
/// translation unit).
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
const VarDecl *VD,
- QualType LValType);
+ QualType LValType) = 0;
+
+ /// Emit a single constructor/destructor with the given type from a C++
+ /// constructor Decl.
+ virtual void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) = 0;
};
// Create an instance of a C++ ABI class:
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 17c3354f93e9..6403fa99aa7b 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -47,7 +47,10 @@ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
- // TODO: add support for CC_X86Pascal to llvm
+ // TODO: Add support for __pascal to LLVM.
+ case CC_X86Pascal: return llvm::CallingConv::C;
+ // TODO: Add support for __vectorcall to LLVM.
+ case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
}
}
@@ -80,42 +83,25 @@ CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
// When translating an unprototyped function type, always use a
// variadic type.
return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
- false, None, FTNP->getExtInfo(),
- RequiredArgs(0));
+ /*instanceMethod=*/false,
+ /*chainCall=*/false, None,
+ FTNP->getExtInfo(), RequiredArgs(0));
}
/// Arrange the LLVM function layout for a value of the given function
-/// type, on top of any implicit parameters already stored. Use the
-/// given ExtInfo instead of the ExtInfo from the function type.
-static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
- bool IsInstanceMethod,
- SmallVectorImpl<CanQualType> &prefix,
- CanQual<FunctionProtoType> FTP,
- FunctionType::ExtInfo extInfo) {
+/// type, on top of any implicit parameters already stored.
+static const CGFunctionInfo &
+arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
+ SmallVectorImpl<CanQualType> &prefix,
+ CanQual<FunctionProtoType> FTP) {
RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
prefix.push_back(FTP->getParamType(i));
CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
- return CGT.arrangeLLVMFunctionInfo(resultType, IsInstanceMethod, prefix,
- extInfo, required);
-}
-
-/// Arrange the argument and result information for a free function (i.e.
-/// not a C++ or ObjC instance method) of the given type.
-static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
- SmallVectorImpl<CanQualType> &prefix,
- CanQual<FunctionProtoType> FTP) {
- return arrangeLLVMFunctionInfo(CGT, false, prefix, FTP, FTP->getExtInfo());
-}
-
-/// Arrange the argument and result information for a free function (i.e.
-/// not a C++ or ObjC instance method) of the given type.
-static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
- SmallVectorImpl<CanQualType> &prefix,
- CanQual<FunctionProtoType> FTP) {
- FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- return arrangeLLVMFunctionInfo(CGT, true, prefix, FTP, extInfo);
+ return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
+ /*chainCall=*/false, prefix,
+ FTP->getExtInfo(), required);
}
/// Arrange the argument and result information for a value of the
@@ -123,7 +109,8 @@ static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
const CGFunctionInfo &
CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
SmallVector<CanQualType, 16> argTypes;
- return ::arrangeFreeFunctionType(*this, argTypes, FTP);
+ return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
+ FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
@@ -137,6 +124,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
if (D->hasAttr<ThisCallAttr>())
return CC_X86ThisCall;
+ if (D->hasAttr<VectorCallAttr>())
+ return CC_X86VectorCall;
+
if (D->hasAttr<PascalAttr>())
return CC_X86Pascal;
@@ -158,23 +148,6 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
return CC_C;
}
-static bool isAAPCSVFP(const CGFunctionInfo &FI, const TargetInfo &Target) {
- switch (FI.getEffectiveCallingConvention()) {
- case llvm::CallingConv::C:
- switch (Target.getTriple().getEnvironment()) {
- case llvm::Triple::EABIHF:
- case llvm::Triple::GNUEABIHF:
- return true;
- default:
- return false;
- }
- case llvm::CallingConv::ARM_AAPCS_VFP:
- return true;
- default:
- return false;
- }
-}
-
/// Arrange the argument and result information for a call to an
/// unknown C++ non-static member function of the given abstract type.
/// (Zero value of RD means we don't have any meaningful "this" argument type,
@@ -192,8 +165,9 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
else
argTypes.push_back(Context.VoidPtrTy);
- return ::arrangeCXXMethodType(*this, argTypes,
- FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
+ return ::arrangeLLVMFunctionInfo(
+ *this, true, argTypes,
+ FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
/// Arrange the argument and result information for a declaration or
@@ -216,31 +190,41 @@ CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
return arrangeFreeFunctionType(prototype);
}
-/// Arrange the argument and result information for a declaration
-/// or definition to the given constructor variant.
const CGFunctionInfo &
-CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
- CXXCtorType ctorKind) {
+CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
+ StructorType Type) {
+
SmallVector<CanQualType, 16> argTypes;
- argTypes.push_back(GetThisType(Context, D->getParent()));
+ argTypes.push_back(GetThisType(Context, MD->getParent()));
- GlobalDecl GD(D, ctorKind);
- CanQualType resultType =
- TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
+ GlobalDecl GD;
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+ GD = GlobalDecl(CD, toCXXCtorType(Type));
+ } else {
+ auto *DD = dyn_cast<CXXDestructorDecl>(MD);
+ GD = GlobalDecl(DD, toCXXDtorType(Type));
+ }
- CanQual<FunctionProtoType> FTP = GetFormalType(D);
+ CanQual<FunctionProtoType> FTP = GetFormalType(MD);
// Add the formal parameters.
for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
argTypes.push_back(FTP->getParamType(i));
- TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
+ TheCXXABI.buildStructorSignature(MD, Type, argTypes);
RequiredArgs required =
- (D->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
+ (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, required);
+ CanQualType resultType = TheCXXABI.HasThisReturn(GD)
+ ? argTypes.front()
+ : TheCXXABI.hasMostDerivedReturn(GD)
+ ? CGM.getContext().VoidPtrTy
+ : Context.VoidTy;
+ return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
+ /*chainCall=*/false, argTypes, extInfo,
+ required);
}
/// Arrange a call to a C++ method, passing the given arguments.
@@ -251,42 +235,22 @@ CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
unsigned ExtraArgs) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> ArgTypes;
- for (CallArgList::const_iterator i = args.begin(), e = args.end(); i != e;
- ++i)
- ArgTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ for (const auto &Arg : args)
+ ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
CanQual<FunctionProtoType> FPT = GetFormalType(D);
RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
GlobalDecl GD(D, CtorKind);
- CanQualType ResultType =
- TheCXXABI.HasThisReturn(GD) ? ArgTypes.front() : Context.VoidTy;
+ CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
+ ? ArgTypes.front()
+ : TheCXXABI.hasMostDerivedReturn(GD)
+ ? CGM.getContext().VoidPtrTy
+ : Context.VoidTy;
FunctionType::ExtInfo Info = FPT->getExtInfo();
- return arrangeLLVMFunctionInfo(ResultType, true, ArgTypes, Info, Required);
-}
-
-/// Arrange the argument and result information for a declaration,
-/// definition, or call to the given destructor variant. It so
-/// happens that all three cases produce the same information.
-const CGFunctionInfo &
-CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
- CXXDtorType dtorKind) {
- SmallVector<CanQualType, 2> argTypes;
- argTypes.push_back(GetThisType(Context, D->getParent()));
-
- GlobalDecl GD(D, dtorKind);
- CanQualType resultType =
- TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
-
- TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
-
- CanQual<FunctionProtoType> FTP = GetFormalType(D);
- assert(FTP->getNumParams() == 0 && "dtor with formal parameters");
- assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
-
- FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo,
- RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
+ /*chainCall=*/false, ArgTypes, Info,
+ Required);
}
/// Arrange the argument and result information for the declaration or
@@ -305,8 +269,9 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
// non-variadic type.
if (isa<FunctionNoProtoType>(FTy)) {
CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
- return arrangeLLVMFunctionInfo(noProto->getReturnType(), false, None,
- noProto->getExtInfo(), RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(
+ noProto->getReturnType(), /*instanceMethod=*/false,
+ /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
}
assert(isa<FunctionProtoType>(FTy));
@@ -350,8 +315,9 @@ CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
RequiredArgs required =
(MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
- return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()), false,
- argTys, einfo, required);
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
+ /*chainCall=*/false, argTys, einfo, required);
}
const CGFunctionInfo &
@@ -360,14 +326,29 @@ CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
- return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
+ return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
- return arrangeCXXDestructor(DD, GD.getDtorType());
+ return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
return arrangeFunctionDeclaration(FD);
}
+/// Arrange a thunk that takes 'this' as the first parameter followed by
+/// varargs. Return a void pointer, regardless of the actual return type.
+/// The body of the thunk will end in a musttail call to a function of the
+/// correct type, and the caller will bitcast the function to the correct
+/// prototype.
+const CGFunctionInfo &
+CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
+ assert(MD->isVirtual() && "only virtual memptrs have thunks");
+ CanQual<FunctionProtoType> FTP = GetFormalType(MD);
+ CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
+ return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
+ /*chainCall=*/false, ArgTys,
+ FTP->getExtInfo(), RequiredArgs(1));
+}
+
/// Arrange a call as unto a free function, except possibly with an
/// additional number of formal parameters considered required.
static const CGFunctionInfo &
@@ -375,7 +356,8 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
CodeGenModule &CGM,
const CallArgList &args,
const FunctionType *fnType,
- unsigned numExtraRequiredArgs) {
+ unsigned numExtraRequiredArgs,
+ bool chainCall) {
assert(args.size() >= numExtraRequiredArgs);
// In most cases, there are no optional arguments.
@@ -397,8 +379,13 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
required = RequiredArgs(args.size());
}
- return CGT.arrangeFreeFunctionCall(fnType->getReturnType(), args,
- fnType->getExtInfo(), required);
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (const auto &arg : args)
+ argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
+ return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
+ /*instanceMethod=*/false, chainCall,
+ argTypes, fnType->getExtInfo(), required);
}
/// Figure out the rules for calling a function with the given formal
@@ -407,8 +394,10 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
/// target-dependent in crazy ways.
const CGFunctionInfo &
CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
- const FunctionType *fnType) {
- return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 0);
+ const FunctionType *fnType,
+ bool chainCall) {
+ return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
+ chainCall ? 1 : 0, chainCall);
}
/// A block function call is essentially a free-function call with an
@@ -416,7 +405,8 @@ CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
const CGFunctionInfo &
CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
const FunctionType *fnType) {
- return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1);
+ return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
+ /*chainCall=*/false);
}
const CGFunctionInfo &
@@ -426,11 +416,11 @@ CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
RequiredArgs required) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
- for (CallArgList::const_iterator i = args.begin(), e = args.end();
- i != e; ++i)
- argTypes.push_back(Context.getCanonicalParamType(i->Ty));
- return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes,
- info, required);
+ for (const auto &Arg : args)
+ argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(resultType), /*instanceMethod=*/false,
+ /*chainCall=*/false, argTypes, info, required);
}
/// Arrange a call to a C++ method, passing the given arguments.
@@ -440,13 +430,13 @@ CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
RequiredArgs required) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
- for (CallArgList::const_iterator i = args.begin(), e = args.end();
- i != e; ++i)
- argTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ for (const auto &Arg : args)
+ argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
FunctionType::ExtInfo info = FPT->getExtInfo();
- return arrangeLLVMFunctionInfo(GetReturnType(FPT->getReturnType()), true,
- argTypes, info, required);
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
+ /*chainCall=*/false, argTypes, info, required);
}
const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
@@ -454,19 +444,20 @@ const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
const FunctionType::ExtInfo &info, bool isVariadic) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
- for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
- i != e; ++i)
- argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
+ for (auto Arg : args)
+ argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
RequiredArgs required =
(isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
- return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, info,
- required);
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(resultType), /*instanceMethod=*/false,
+ /*chainCall=*/false, argTypes, info, required);
}
const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
- return arrangeLLVMFunctionInfo(getContext().VoidTy, false, None,
- FunctionType::ExtInfo(), RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(
+ getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
+ None, FunctionType::ExtInfo(), RequiredArgs::All);
}
/// Arrange the argument and result information for an abstract value
@@ -474,22 +465,20 @@ const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
/// above functions ultimately defer to.
const CGFunctionInfo &
CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
- bool IsInstanceMethod,
+ bool instanceMethod,
+ bool chainCall,
ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
RequiredArgs required) {
-#ifndef NDEBUG
- for (ArrayRef<CanQualType>::const_iterator
- I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
- assert(I->isCanonicalAsParam());
-#endif
+ assert(std::all_of(argTypes.begin(), argTypes.end(),
+ std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
- CGFunctionInfo::Profile(ID, IsInstanceMethod, info, required, resultType,
- argTypes);
+ CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
+ resultType, argTypes);
void *insertPos = nullptr;
CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
@@ -497,11 +486,12 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
return *FI;
// Construct the function info. We co-allocate the ArgInfos.
- FI = CGFunctionInfo::create(CC, IsInstanceMethod, info, resultType, argTypes,
- required);
+ FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
+ resultType, argTypes, required);
FunctionInfos.InsertNode(FI, insertPos);
- bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
+ bool inserted = FunctionsBeingProcessed.insert(FI).second;
+ (void)inserted;
assert(inserted && "Recursively being processed?");
// Compute ABI information.
@@ -525,7 +515,8 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
}
CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
- bool IsInstanceMethod,
+ bool instanceMethod,
+ bool chainCall,
const FunctionType::ExtInfo &info,
CanQualType resultType,
ArrayRef<CanQualType> argTypes,
@@ -536,7 +527,8 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
FI->CallingConvention = llvmCC;
FI->EffectiveCallingConvention = llvmCC;
FI->ASTCallingConvention = info.getCC();
- FI->InstanceMethod = IsInstanceMethod;
+ FI->InstanceMethod = instanceMethod;
+ FI->ChainCall = chainCall;
FI->NoReturn = info.getNoReturn();
FI->ReturnsRetained = info.getProducesResult();
FI->Required = required;
@@ -552,13 +544,79 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
/***/
-void CodeGenTypes::GetExpandedTypes(QualType type,
- SmallVectorImpl<llvm::Type*> &expandedTypes) {
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
- uint64_t NumElts = AT->getSize().getZExtValue();
- for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
- GetExpandedTypes(AT->getElementType(), expandedTypes);
- } else if (const RecordType *RT = type->getAs<RecordType>()) {
+namespace {
+// ABIArgInfo::Expand implementation.
+
+// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
+struct TypeExpansion {
+ enum TypeExpansionKind {
+ // Elements of constant arrays are expanded recursively.
+ TEK_ConstantArray,
+ // Record fields are expanded recursively (but if record is a union, only
+ // the field with the largest size is expanded).
+ TEK_Record,
+ // For complex types, real and imaginary parts are expanded recursively.
+ TEK_Complex,
+ // All other types are not expandable.
+ TEK_None
+ };
+
+ const TypeExpansionKind Kind;
+
+ TypeExpansion(TypeExpansionKind K) : Kind(K) {}
+ virtual ~TypeExpansion() {}
+};
+
+struct ConstantArrayExpansion : TypeExpansion {
+ QualType EltTy;
+ uint64_t NumElts;
+
+ ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
+ : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_ConstantArray;
+ }
+};
+
+struct RecordExpansion : TypeExpansion {
+ SmallVector<const CXXBaseSpecifier *, 1> Bases;
+
+ SmallVector<const FieldDecl *, 1> Fields;
+
+ RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
+ SmallVector<const FieldDecl *, 1> &&Fields)
+ : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_Record;
+ }
+};
+
+struct ComplexExpansion : TypeExpansion {
+ QualType EltTy;
+
+ ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_Complex;
+ }
+};
+
+struct NoExpansion : TypeExpansion {
+ NoExpansion() : TypeExpansion(TEK_None) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_None;
+ }
+};
+} // namespace
+
+static std::unique_ptr<TypeExpansion>
+getTypeExpansion(QualType Ty, const ASTContext &Context) {
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ return llvm::make_unique<ConstantArrayExpansion>(
+ AT->getElementType(), AT->getSize().getZExtValue());
+ }
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ SmallVector<const CXXBaseSpecifier *, 1> Bases;
+ SmallVector<const FieldDecl *, 1> Fields;
const RecordDecl *RD = RT->getDecl();
assert(!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.");
@@ -569,88 +627,178 @@ void CodeGenTypes::GetExpandedTypes(QualType type,
CharUnits UnionSize = CharUnits::Zero();
for (const auto *FD : RD->fields()) {
+ // Skip zero length bitfields.
+ if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
+ continue;
assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
- CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
+ CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
if (UnionSize < FieldSize) {
UnionSize = FieldSize;
LargestFD = FD;
}
}
if (LargestFD)
- GetExpandedTypes(LargestFD->getType(), expandedTypes);
+ Fields.push_back(LargestFD);
} else {
- for (const auto *I : RD->fields()) {
- assert(!I->isBitField() &&
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ assert(!CXXRD->isDynamicClass() &&
+ "cannot expand vtable pointers in dynamic classes");
+ for (const CXXBaseSpecifier &BS : CXXRD->bases())
+ Bases.push_back(&BS);
+ }
+
+ for (const auto *FD : RD->fields()) {
+ // Skip zero length bitfields.
+ if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
+ continue;
+ assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
- GetExpandedTypes(I->getType(), expandedTypes);
+ Fields.push_back(FD);
}
}
- } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
- llvm::Type *EltTy = ConvertType(CT->getElementType());
- expandedTypes.push_back(EltTy);
- expandedTypes.push_back(EltTy);
- } else
- expandedTypes.push_back(ConvertType(type));
+ return llvm::make_unique<RecordExpansion>(std::move(Bases),
+ std::move(Fields));
+ }
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ return llvm::make_unique<ComplexExpansion>(CT->getElementType());
+ }
+ return llvm::make_unique<NoExpansion>();
}
-llvm::Function::arg_iterator
-CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
- llvm::Function::arg_iterator AI) {
- assert(LV.isSimple() &&
- "Unexpected non-simple lvalue during struct expansion.");
+static int getExpansionSize(QualType Ty, const ASTContext &Context) {
+ auto Exp = getTypeExpansion(Ty, Context);
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
+ }
+ if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ int Res = 0;
+ for (auto BS : RExp->Bases)
+ Res += getExpansionSize(BS->getType(), Context);
+ for (auto FD : RExp->Fields)
+ Res += getExpansionSize(FD->getType(), Context);
+ return Res;
+ }
+ if (isa<ComplexExpansion>(Exp.get()))
+ return 2;
+ assert(isa<NoExpansion>(Exp.get()));
+ return 1;
+}
- if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- unsigned NumElts = AT->getSize().getZExtValue();
- QualType EltTy = AT->getElementType();
- for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
- llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
- LValue LV = MakeAddrLValue(EltAddr, EltTy);
- AI = ExpandTypeFromArgs(EltTy, LV, AI);
+void
+CodeGenTypes::getExpandedTypes(QualType Ty,
+ SmallVectorImpl<llvm::Type *>::iterator &TI) {
+ auto Exp = getTypeExpansion(Ty, Context);
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ for (int i = 0, n = CAExp->NumElts; i < n; i++) {
+ getExpandedTypes(CAExp->EltTy, TI);
}
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- RecordDecl *RD = RT->getDecl();
- if (RD->isUnion()) {
- // Unions can be here only in degenerative cases - all the fields are same
- // after flattening. Thus we have to use the "largest" field.
- const FieldDecl *LargestFD = nullptr;
- CharUnits UnionSize = CharUnits::Zero();
+ } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ for (auto BS : RExp->Bases)
+ getExpandedTypes(BS->getType(), TI);
+ for (auto FD : RExp->Fields)
+ getExpandedTypes(FD->getType(), TI);
+ } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
+ llvm::Type *EltTy = ConvertType(CExp->EltTy);
+ *TI++ = EltTy;
+ *TI++ = EltTy;
+ } else {
+ assert(isa<NoExpansion>(Exp.get()));
+ *TI++ = ConvertType(Ty);
+ }
+}
- for (const auto *FD : RD->fields()) {
- assert(!FD->isBitField() &&
- "Cannot expand structure with bit-field members.");
- CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
- if (UnionSize < FieldSize) {
- UnionSize = FieldSize;
- LargestFD = FD;
- }
- }
- if (LargestFD) {
- // FIXME: What are the right qualifiers here?
- LValue SubLV = EmitLValueForField(LV, LargestFD);
- AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
- }
- } else {
- for (const auto *FD : RD->fields()) {
- QualType FT = FD->getType();
+void CodeGenFunction::ExpandTypeFromArgs(
+ QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
+ assert(LV.isSimple() &&
+ "Unexpected non-simple lvalue during struct expansion.");
- // FIXME: What are the right qualifiers here?
- LValue SubLV = EmitLValueForField(LV, FD);
- AI = ExpandTypeFromArgs(FT, SubLV, AI);
- }
+ auto Exp = getTypeExpansion(Ty, getContext());
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ for (int i = 0, n = CAExp->NumElts; i < n; i++) {
+ llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, i);
+ LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
+ ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
+ }
+ } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ llvm::Value *This = LV.getAddress();
+ for (const CXXBaseSpecifier *BS : RExp->Bases) {
+ // Perform a single step derived-to-base conversion.
+ llvm::Value *Base =
+ GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
+ /*NullCheckValue=*/false, SourceLocation());
+ LValue SubLV = MakeAddrLValue(Base, BS->getType());
+
+ // Recurse onto bases.
+ ExpandTypeFromArgs(BS->getType(), SubLV, AI);
}
- } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
- QualType EltTy = CT->getElementType();
+ for (auto FD : RExp->Fields) {
+ // FIXME: What are the right qualifiers here?
+ LValue SubLV = EmitLValueForField(LV, FD);
+ ExpandTypeFromArgs(FD->getType(), SubLV, AI);
+ }
+ } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
- EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
+ EmitStoreThroughLValue(RValue::get(*AI++),
+ MakeAddrLValue(RealAddr, CExp->EltTy));
llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
- EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
+ EmitStoreThroughLValue(RValue::get(*AI++),
+ MakeAddrLValue(ImagAddr, CExp->EltTy));
} else {
- EmitStoreThroughLValue(RValue::get(AI), LV);
- ++AI;
+ assert(isa<NoExpansion>(Exp.get()));
+ EmitStoreThroughLValue(RValue::get(*AI++), LV);
}
+}
+
+void CodeGenFunction::ExpandTypeToArgs(
+ QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
+ SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
+ auto Exp = getTypeExpansion(Ty, getContext());
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (int i = 0, n = CAExp->NumElts; i < n; i++) {
+ llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, i);
+ RValue EltRV =
+ convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
+ ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
+ }
+ } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ llvm::Value *This = RV.getAggregateAddr();
+ for (const CXXBaseSpecifier *BS : RExp->Bases) {
+ // Perform a single step derived-to-base conversion.
+ llvm::Value *Base =
+ GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
+ /*NullCheckValue=*/false, SourceLocation());
+ RValue BaseRV = RValue::getAggregate(Base);
+
+ // Recurse onto bases.
+ ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
+ IRCallArgPos);
+ }
+
+ LValue LV = MakeAddrLValue(This, Ty);
+ for (auto FD : RExp->Fields) {
+ RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
+ ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
+ IRCallArgPos);
+ }
+ } else if (isa<ComplexExpansion>(Exp.get())) {
+ ComplexPairTy CV = RV.getComplexVal();
+ IRCallArgs[IRCallArgPos++] = CV.first;
+ IRCallArgs[IRCallArgPos++] = CV.second;
+ } else {
+ assert(isa<NoExpansion>(Exp.get()));
+ assert(RV.isScalar() &&
+ "Unexpected non-scalar rvalue during struct expansion.");
+
+ // Insert a bitcast as needed.
+ llvm::Value *V = RV.getScalarVal();
+ if (IRCallArgPos < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(IRCallArgPos))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
- return AI;
+ IRCallArgs[IRCallArgPos++] = V;
+ }
}
/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
@@ -667,11 +815,13 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
llvm::Type *FirstElt = SrcSTy->getElementType(0);
// If the first elt is at least as large as what we're looking for, or if the
- // first element is the same size as the whole struct, we can enter it.
+ // first element is the same size as the whole struct, we can enter it. The
+ // comparison must be made on the store size and not the alloca size. Using
+ // the alloca size may overstate the size of the load.
uint64_t FirstEltSize =
- CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
+ CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
if (FirstEltSize < DstSize &&
- FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
+ FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
return SrcPtr;
// GEP into the first element.
@@ -890,6 +1040,145 @@ static void CreateCoercedStore(llvm::Value *Src,
}
}
+namespace {
+
+/// Encapsulates information about the way function arguments from
+/// CGFunctionInfo should be passed to actual LLVM IR function.
+class ClangToLLVMArgMapping {
+ static const unsigned InvalidIndex = ~0U;
+ unsigned InallocaArgNo;
+ unsigned SRetArgNo;
+ unsigned TotalIRArgs;
+
+ /// Arguments of LLVM IR function corresponding to single Clang argument.
+ struct IRArgs {
+ unsigned PaddingArgIndex;
+ // Argument is expanded to IR arguments at positions
+ // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
+ unsigned FirstArgIndex;
+ unsigned NumberOfArgs;
+
+ IRArgs()
+ : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
+ NumberOfArgs(0) {}
+ };
+
+ SmallVector<IRArgs, 8> ArgInfo;
+
+public:
+ ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
+ bool OnlyRequiredArgs = false)
+ : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
+ ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
+ construct(Context, FI, OnlyRequiredArgs);
+ }
+
+ bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
+ unsigned getInallocaArgNo() const {
+ assert(hasInallocaArg());
+ return InallocaArgNo;
+ }
+
+ bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
+ unsigned getSRetArgNo() const {
+ assert(hasSRetArg());
+ return SRetArgNo;
+ }
+
+ unsigned totalIRArgs() const { return TotalIRArgs; }
+
+ bool hasPaddingArg(unsigned ArgNo) const {
+ assert(ArgNo < ArgInfo.size());
+ return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
+ }
+ unsigned getPaddingArgNo(unsigned ArgNo) const {
+ assert(hasPaddingArg(ArgNo));
+ return ArgInfo[ArgNo].PaddingArgIndex;
+ }
+
+ /// Returns index of first IR argument corresponding to ArgNo, and their
+ /// quantity.
+ std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
+ assert(ArgNo < ArgInfo.size());
+ return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
+ ArgInfo[ArgNo].NumberOfArgs);
+ }
+
+private:
+ void construct(const ASTContext &Context, const CGFunctionInfo &FI,
+ bool OnlyRequiredArgs);
+};
+
+void ClangToLLVMArgMapping::construct(const ASTContext &Context,
+ const CGFunctionInfo &FI,
+ bool OnlyRequiredArgs) {
+ unsigned IRArgNo = 0;
+ bool SwapThisWithSRet = false;
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+
+ if (RetAI.getKind() == ABIArgInfo::Indirect) {
+ SwapThisWithSRet = RetAI.isSRetAfterThis();
+ SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
+ }
+
+ unsigned ArgNo = 0;
+ unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
+ for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
+ ++I, ++ArgNo) {
+ assert(I != FI.arg_end());
+ QualType ArgType = I->type;
+ const ABIArgInfo &AI = I->info;
+ // Collect data about IR arguments corresponding to Clang argument ArgNo.
+ auto &IRArgs = ArgInfo[ArgNo];
+
+ if (AI.getPaddingType())
+ IRArgs.PaddingArgIndex = IRArgNo++;
+
+ switch (AI.getKind()) {
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // FIXME: handle sseregparm someday...
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
+ if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
+ IRArgs.NumberOfArgs = STy->getNumElements();
+ } else {
+ IRArgs.NumberOfArgs = 1;
+ }
+ break;
+ }
+ case ABIArgInfo::Indirect:
+ IRArgs.NumberOfArgs = 1;
+ break;
+ case ABIArgInfo::Ignore:
+ case ABIArgInfo::InAlloca:
+ // ignore and inalloca doesn't have matching LLVM parameters.
+ IRArgs.NumberOfArgs = 0;
+ break;
+ case ABIArgInfo::Expand: {
+ IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
+ break;
+ }
+ }
+
+ if (IRArgs.NumberOfArgs > 0) {
+ IRArgs.FirstArgIndex = IRArgNo;
+ IRArgNo += IRArgs.NumberOfArgs;
+ }
+
+ // Skip over the sret parameter when it comes second. We already handled it
+ // above.
+ if (IRArgNo == 1 && SwapThisWithSRet)
+ IRArgNo++;
+ }
+ assert(ArgNo == ArgInfo.size());
+
+ if (FI.usesInAlloca())
+ InallocaArgNo = IRArgNo++;
+
+ TotalIRArgs = IRArgNo;
+}
+} // namespace
+
/***/
bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
@@ -936,14 +1225,12 @@ llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
llvm::FunctionType *
CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
-
- bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
+
+ bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
+ (void)Inserted;
assert(Inserted && "Recursively being processed?");
-
- bool SwapThisWithSRet = false;
- SmallVector<llvm::Type*, 8> argTypes;
- llvm::Type *resultType = nullptr;
+ llvm::Type *resultType = nullptr;
const ABIArgInfo &retAI = FI.getReturnInfo();
switch (retAI.getKind()) {
case ABIArgInfo::Expand:
@@ -969,13 +1256,6 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
case ABIArgInfo::Indirect: {
assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
resultType = llvm::Type::getVoidTy(getLLVMContext());
-
- QualType ret = FI.getReturnType();
- llvm::Type *ty = ConvertType(ret);
- unsigned addressSpace = Context.getTargetAddressSpace(ret);
- argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
-
- SwapThisWithSRet = retAI.isSRetAfterThis();
break;
}
@@ -984,67 +1264,83 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
break;
}
- // Add in all of the required arguments.
- CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
- if (FI.isVariadic()) {
- ie = it + FI.getRequiredArgs().getNumRequiredArgs();
- } else {
- ie = FI.arg_end();
+ ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
+ SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
+
+ // Add type for sret argument.
+ if (IRFunctionArgs.hasSRetArg()) {
+ QualType Ret = FI.getReturnType();
+ llvm::Type *Ty = ConvertType(Ret);
+ unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
+ ArgTypes[IRFunctionArgs.getSRetArgNo()] =
+ llvm::PointerType::get(Ty, AddressSpace);
+ }
+
+ // Add type for inalloca argument.
+ if (IRFunctionArgs.hasInallocaArg()) {
+ auto ArgStruct = FI.getArgStruct();
+ assert(ArgStruct);
+ ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
}
- for (; it != ie; ++it) {
- const ABIArgInfo &argAI = it->info;
+
+ // Add in all of the required arguments.
+ unsigned ArgNo = 0;
+ CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = it + FI.getNumRequiredArgs();
+ for (; it != ie; ++it, ++ArgNo) {
+ const ABIArgInfo &ArgInfo = it->info;
// Insert a padding type to ensure proper alignment.
- if (llvm::Type *PaddingType = argAI.getPaddingType())
- argTypes.push_back(PaddingType);
+ if (IRFunctionArgs.hasPaddingArg(ArgNo))
+ ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
+ ArgInfo.getPaddingType();
+
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
- switch (argAI.getKind()) {
+ switch (ArgInfo.getKind()) {
case ABIArgInfo::Ignore:
case ABIArgInfo::InAlloca:
+ assert(NumIRArgs == 0);
break;
case ABIArgInfo::Indirect: {
+ assert(NumIRArgs == 1);
// indirect arguments are always on the stack, which is addr space #0.
llvm::Type *LTy = ConvertTypeForMem(it->type);
- argTypes.push_back(LTy->getPointerTo());
+ ArgTypes[FirstIRArg] = LTy->getPointerTo();
break;
}
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- // If the coerce-to type is a first class aggregate, flatten it. Either
- // way is semantically identical, but fast-isel and the optimizer
- // generally likes scalar values better than FCAs.
- // We cannot do this for functions using the AAPCS calling convention,
- // as structures are treated differently by that calling convention.
- llvm::Type *argType = argAI.getCoerceToType();
+ // Fast-isel and the optimizer generally like scalar values better than
+ // FCAs, so we flatten them if this is safe to do for this argument.
+ llvm::Type *argType = ArgInfo.getCoerceToType();
llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
- if (st && !isAAPCSVFP(FI, getTarget())) {
+ if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
+ assert(NumIRArgs == st->getNumElements());
for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
- argTypes.push_back(st->getElementType(i));
+ ArgTypes[FirstIRArg + i] = st->getElementType(i);
} else {
- argTypes.push_back(argType);
+ assert(NumIRArgs == 1);
+ ArgTypes[FirstIRArg] = argType;
}
break;
}
case ABIArgInfo::Expand:
- GetExpandedTypes(it->type, argTypes);
+ auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
+ getExpandedTypes(it->type, ArgTypesIter);
+ assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
break;
}
}
- // Add the inalloca struct as the last parameter type.
- if (llvm::StructType *ArgStruct = FI.getArgStruct())
- argTypes.push_back(ArgStruct->getPointerTo());
-
- if (SwapThisWithSRet)
- std::swap(argTypes[0], argTypes[1]);
-
bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
assert(Erased && "Not in set?");
-
- return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
+
+ return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
}
llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
@@ -1056,7 +1352,8 @@ llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CGFunctionInfo *Info;
if (isa<CXXDestructorDecl>(MD))
- Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ Info =
+ &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
else
Info = &arrangeCXXMethodDeclaration(MD);
return GetFunctionType(*Info);
@@ -1069,6 +1366,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
bool AttrOnCallSite) {
llvm::AttrBuilder FuncAttrs;
llvm::AttrBuilder RetAttrs;
+ bool HasOptnone = false;
CallingConv = FI.getEffectiveCallingConvention();
@@ -1109,12 +1407,18 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
RetAttrs.addAttribute(llvm::Attribute::NoAlias);
if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
RetAttrs.addAttribute(llvm::Attribute::NonNull);
+
+ HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
+ }
+
+ // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
+ if (!HasOptnone) {
+ if (CodeGenOpts.OptimizeSize)
+ FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
+ if (CodeGenOpts.OptimizeSize == 2)
+ FuncAttrs.addAttribute(llvm::Attribute::MinSize);
}
- if (CodeGenOpts.OptimizeSize)
- FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
- if (CodeGenOpts.OptimizeSize == 2)
- FuncAttrs.addAttribute(llvm::Attribute::MinSize);
if (CodeGenOpts.DisableRedZone)
FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
if (CodeGenOpts.NoImplicitFloat)
@@ -1156,9 +1460,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
FuncAttrs.addAttribute("no-realign-stack");
}
+ ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
+
QualType RetTy = FI.getReturnType();
- unsigned Index = 1;
- bool SwapThisWithSRet = false;
const ABIArgInfo &RetAI = FI.getReturnInfo();
switch (RetAI.getKind()) {
case ABIArgInfo::Extend:
@@ -1174,25 +1478,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
case ABIArgInfo::Ignore:
break;
- case ABIArgInfo::InAlloca: {
- // inalloca disables readnone and readonly
- FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
- .removeAttribute(llvm::Attribute::ReadNone);
- break;
- }
-
+ case ABIArgInfo::InAlloca:
case ABIArgInfo::Indirect: {
- llvm::AttrBuilder SRETAttrs;
- SRETAttrs.addAttribute(llvm::Attribute::StructRet);
- if (RetAI.getInReg())
- SRETAttrs.addAttribute(llvm::Attribute::InReg);
- SwapThisWithSRet = RetAI.isSRetAfterThis();
- PAL.push_back(llvm::AttributeSet::get(
- getLLVMContext(), SwapThisWithSRet ? 2 : Index, SRETAttrs));
-
- if (!SwapThisWithSRet)
- ++Index;
- // sret disables readnone and readonly
+ // inalloca and sret disable readnone and readonly
FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
.removeAttribute(llvm::Attribute::ReadNone);
break;
@@ -1211,28 +1499,44 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
RetAttrs.addAttribute(llvm::Attribute::NonNull);
}
- if (RetAttrs.hasAttributes())
- PAL.push_back(llvm::
- AttributeSet::get(getLLVMContext(),
- llvm::AttributeSet::ReturnIndex,
- RetAttrs));
+ // Attach return attributes.
+ if (RetAttrs.hasAttributes()) {
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
+ }
- for (const auto &I : FI.arguments()) {
- QualType ParamType = I.type;
- const ABIArgInfo &AI = I.info;
+ // Attach attributes to sret.
+ if (IRFunctionArgs.hasSRetArg()) {
+ llvm::AttrBuilder SRETAttrs;
+ SRETAttrs.addAttribute(llvm::Attribute::StructRet);
+ if (RetAI.getInReg())
+ SRETAttrs.addAttribute(llvm::Attribute::InReg);
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
+ }
+
+ // Attach attributes to inalloca argument.
+ if (IRFunctionArgs.hasInallocaArg()) {
llvm::AttrBuilder Attrs;
+ Attrs.addAttribute(llvm::Attribute::InAlloca);
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
+ }
- // Skip over the sret parameter when it comes second. We already handled it
- // above.
- if (Index == 2 && SwapThisWithSRet)
- ++Index;
+ unsigned ArgNo = 0;
+ for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
+ E = FI.arg_end();
+ I != E; ++I, ++ArgNo) {
+ QualType ParamType = I->type;
+ const ABIArgInfo &AI = I->info;
+ llvm::AttrBuilder Attrs;
- if (AI.getPaddingType()) {
+ // Add attribute for padding argument, if necessary.
+ if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
if (AI.getPaddingInReg())
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index,
- llvm::Attribute::InReg));
- // Increment Index if there is padding.
- ++Index;
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
+ llvm::Attribute::InReg));
}
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
@@ -1245,24 +1549,13 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
else if (ParamType->isUnsignedIntegerOrEnumerationType())
Attrs.addAttribute(llvm::Attribute::ZExt);
// FALL THROUGH
- case ABIArgInfo::Direct: {
- if (AI.getInReg())
+ case ABIArgInfo::Direct:
+ if (ArgNo == 0 && FI.isChainCall())
+ Attrs.addAttribute(llvm::Attribute::Nest);
+ else if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
-
- // FIXME: handle sseregparm someday...
-
- llvm::StructType *STy =
- dyn_cast<llvm::StructType>(AI.getCoerceToType());
- if (!isAAPCSVFP(FI, getTarget()) && STy) {
- unsigned Extra = STy->getNumElements()-1; // 1 will be added below.
- if (Attrs.hasAttributes())
- for (unsigned I = 0; I < Extra; ++I)
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I,
- Attrs));
- Index += Extra;
- }
break;
- }
+
case ABIArgInfo::Indirect:
if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
@@ -1278,26 +1571,15 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
break;
case ABIArgInfo::Ignore:
- // Skip increment, no matching LLVM parameter.
+ case ABIArgInfo::Expand:
continue;
case ABIArgInfo::InAlloca:
// inalloca disables readnone and readonly.
FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
.removeAttribute(llvm::Attribute::ReadNone);
- // Skip increment, no matching LLVM parameter.
- continue;
-
- case ABIArgInfo::Expand: {
- SmallVector<llvm::Type*, 8> types;
- // FIXME: This is rather inefficient. Do we ever actually need to do
- // anything here? The result should be just reconstructed on the other
- // side, so extension should be a non-issue.
- getTypes().GetExpandedTypes(ParamType, types);
- Index += types.size();
continue;
}
- }
if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
QualType PTy = RefTy->getPointeeType();
@@ -1308,17 +1590,15 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
Attrs.addAttribute(llvm::Attribute::NonNull);
}
- if (Attrs.hasAttributes())
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
- ++Index;
- }
-
- // Add the inalloca attribute to the trailing inalloca parameter if present.
- if (FI.usesInAlloca()) {
- llvm::AttrBuilder Attrs;
- Attrs.addAttribute(llvm::Attribute::InAlloca);
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
+ if (Attrs.hasAttributes()) {
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
+ for (unsigned i = 0; i < NumIRArgs; i++)
+ PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
+ FirstIRArg + i + 1, Attrs));
+ }
}
+ assert(ArgNo == FI.arg_size());
if (FuncAttrs.hasAttributes())
PAL.push_back(llvm::
@@ -1347,9 +1627,41 @@ static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
}
+/// Returns the attribute (either parameter attribute, or function
+/// attribute), which declares argument ArgNo to be non-null.
+static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
+ QualType ArgType, unsigned ArgNo) {
+ // FIXME: __attribute__((nonnull)) can also be applied to:
+ // - references to pointers, where the pointee is known to be
+ // nonnull (apparently a Clang extension)
+ // - transparent unions containing pointers
+ // In the former case, LLVM IR cannot represent the constraint. In
+ // the latter case, we have no guarantee that the transparent union
+ // is in fact passed as a pointer.
+ if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
+ return nullptr;
+ // First, check attribute on parameter itself.
+ if (PVD) {
+ if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
+ return ParmNNAttr;
+ }
+ // Check function attributes.
+ if (!FD)
+ return nullptr;
+ for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
+ if (NNAttr->isNonNull(ArgNo))
+ return NNAttr;
+ }
+ return nullptr;
+}
+
void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Function *Fn,
const FunctionArgList &Args) {
+ if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
+ // Naked functions don't have prologues.
+ return;
+
// If this is an implicit-return-zero function, go ahead and
// initialize the return value. TODO: it might be nice to have
// a more general mechanism for this that didn't require synthesized
@@ -1366,39 +1678,31 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// FIXME: We no longer need the types from FunctionArgList; lift up and
// simplify.
- // Emit allocs for param decls. Give the LLVM Argument nodes names.
- llvm::Function::arg_iterator AI = Fn->arg_begin();
+ ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
+ // Flattened function arguments.
+ SmallVector<llvm::Argument *, 16> FnArgs;
+ FnArgs.reserve(IRFunctionArgs.totalIRArgs());
+ for (auto &Arg : Fn->args()) {
+ FnArgs.push_back(&Arg);
+ }
+ assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
// If we're using inalloca, all the memory arguments are GEPs off of the last
// parameter, which is a pointer to the complete memory area.
llvm::Value *ArgStruct = nullptr;
- if (FI.usesInAlloca()) {
- llvm::Function::arg_iterator EI = Fn->arg_end();
- --EI;
- ArgStruct = EI;
+ if (IRFunctionArgs.hasInallocaArg()) {
+ ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()];
assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
}
- // Name the struct return parameter, which can come first or second.
- const ABIArgInfo &RetAI = FI.getReturnInfo();
- bool SwapThisWithSRet = false;
- if (RetAI.isIndirect()) {
- SwapThisWithSRet = RetAI.isSRetAfterThis();
- if (SwapThisWithSRet)
- ++AI;
+ // Name the struct return parameter.
+ if (IRFunctionArgs.hasSRetArg()) {
+ auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
AI->setName("agg.result");
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
llvm::Attribute::NoAlias));
- if (SwapThisWithSRet)
- --AI; // Go back to the beginning for 'this'.
- else
- ++AI; // Skip the sret parameter.
}
- // Get the function-level nonnull attribute if it exists.
- const NonNullAttr *NNAtt =
- CurCodeDecl ? CurCodeDecl->getAttr<NonNullAttr>() : nullptr;
-
// Track if we received the parameter as a pointer (indirect, byval, or
// inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
// into a local alloca for us.
@@ -1413,9 +1717,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// we can push the cleanups in the correct order for the ABI.
assert(FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.");
- unsigned ArgNo = 1;
+ unsigned ArgNo = 0;
CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
- for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i, ++info_it, ++ArgNo) {
const VarDecl *Arg = *i;
QualType Ty = info_it->type;
@@ -1424,20 +1728,21 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
bool isPromoted =
isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
- // Skip the dummy padding argument.
- if (ArgI.getPaddingType())
- ++AI;
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
switch (ArgI.getKind()) {
case ABIArgInfo::InAlloca: {
+ assert(NumIRArgs == 0);
llvm::Value *V = Builder.CreateStructGEP(
ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName());
ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
- continue; // Don't increment AI!
+ break;
}
case ABIArgInfo::Indirect: {
- llvm::Value *V = AI;
+ assert(NumIRArgs == 1);
+ llvm::Value *V = FnArgs[FirstIRArg];
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
@@ -1483,12 +1788,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
ArgI.getCoerceToType() == ConvertType(Ty) &&
ArgI.getDirectOffset() == 0) {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
+ assert(NumIRArgs == 1);
+ auto AI = FnArgs[FirstIRArg];
llvm::Value *V = AI;
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
- if ((NNAtt && NNAtt->isNonNull(PVD->getFunctionScopeIndex())) ||
- PVD->hasAttr<NonNullAttr>())
+ if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
+ PVD->getFunctionScopeIndex()))
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
AI->getArgNo() + 1,
llvm::Attribute::NonNull));
@@ -1527,6 +1833,25 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
AI->getArgNo() + 1,
llvm::Attribute::NonNull));
}
+
+ const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
+ if (!AVAttr)
+ if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
+ AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
+ if (AVAttr) {
+ llvm::Value *AlignmentValue =
+ EmitScalarExpr(AVAttr->getAlignment());
+ llvm::ConstantInt *AlignmentCI =
+ cast<llvm::ConstantInt>(AlignmentValue);
+ unsigned Alignment =
+ std::min((unsigned) AlignmentCI->getZExtValue(),
+ +llvm::Value::MaximumAlignment);
+
+ llvm::AttrBuilder Attrs;
+ Attrs.addAlignmentAttr(Alignment);
+ AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
+ AI->getArgNo() + 1, Attrs));
+ }
}
if (Arg->getType().isRestrictQualified())
@@ -1581,13 +1906,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
}
- // If the coerce-to type is a first class aggregate, we flatten it and
- // pass the elements. Either way is semantically identical, but fast-isel
- // and the optimizer generally likes scalar values better than FCAs.
- // We cannot do this for functions using the AAPCS calling convention,
- // as structures are treated differently by that calling convention.
+ // Fast-isel and the optimizer generally like scalar values better than
+ // FCAs, so we flatten them if this is safe to do for this argument.
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
- if (!isAAPCSVFP(FI, getTarget()) && STy && STy->getNumElements() > 1) {
+ if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
+ STy->getNumElements() > 1) {
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
llvm::Type *DstTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
@@ -1596,11 +1919,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (SrcSize <= DstSize) {
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+ assert(STy->getNumElements() == NumIRArgs);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
+ auto AI = FnArgs[FirstIRArg + i];
AI->setName(Arg->getName() + ".coerce" + Twine(i));
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
- Builder.CreateStore(AI++, EltPtr);
+ Builder.CreateStore(AI, EltPtr);
}
} else {
llvm::AllocaInst *TempAlloca =
@@ -1608,20 +1932,22 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
TempAlloca->setAlignment(AlignmentToUse);
llvm::Value *TempV = TempAlloca;
+ assert(STy->getNumElements() == NumIRArgs);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
+ auto AI = FnArgs[FirstIRArg + i];
AI->setName(Arg->getName() + ".coerce" + Twine(i));
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
- Builder.CreateStore(AI++, EltPtr);
+ Builder.CreateStore(AI, EltPtr);
}
Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
}
} else {
// Simple case, just do a coerced store of the argument into the alloca.
- assert(AI != Fn->arg_end() && "Argument mismatch!");
+ assert(NumIRArgs == 1);
+ auto AI = FnArgs[FirstIRArg];
AI->setName(Arg->getName() + ".coerce");
- CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
+ CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
}
@@ -1634,7 +1960,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
} else {
ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
}
- continue; // Skip ++AI increment, already done.
+ break;
}
case ABIArgInfo::Expand: {
@@ -1645,17 +1971,20 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
CharUnits Align = getContext().getDeclAlign(Arg);
Alloca->setAlignment(Align.getQuantity());
LValue LV = MakeAddrLValue(Alloca, Ty, Align);
- llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
- // Name the arguments used in expansion and increment AI.
- unsigned Index = 0;
- for (; AI != End; ++AI, ++Index)
- AI->setName(Arg->getName() + "." + Twine(Index));
- continue;
+ auto FnArgIter = FnArgs.begin() + FirstIRArg;
+ ExpandTypeFromArgs(Ty, LV, FnArgIter);
+ assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
+ for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
+ auto AI = FnArgs[FirstIRArg + i];
+ AI->setName(Arg->getName() + "." + Twine(i));
+ }
+ break;
}
case ABIArgInfo::Ignore:
+ assert(NumIRArgs == 0);
// Initialize the local variable appropriately.
if (!hasScalarEvaluationKind(Ty)) {
ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
@@ -1663,21 +1992,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
}
-
- // Skip increment, no matching LLVM parameter.
- continue;
+ break;
}
-
- ++AI;
-
- if (ArgNo == 1 && SwapThisWithSRet)
- ++AI; // Skip the sret parameter.
}
- if (FI.usesInAlloca())
- ++AI;
- assert(AI == Fn->arg_end() && "Argument mismatch!");
-
if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
for (int I = Args.size() - 1; I >= 0; --I)
EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
@@ -1887,6 +2205,12 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
bool EmitRetDbgLoc,
SourceLocation EndLoc) {
+ if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
+ // Naked functions don't have epilogues.
+ Builder.CreateUnreachable();
+ return;
+ }
+
// Functions with no result always return void.
if (!ReturnValue) {
Builder.CreateRetVoid();
@@ -1998,7 +2322,26 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
llvm_unreachable("Invalid ABI kind for return argument");
}
- llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
+ llvm::Instruction *Ret;
+ if (RV) {
+ if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
+ if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) {
+ SanitizerScope SanScope(this);
+ llvm::Value *Cond = Builder.CreateICmpNE(
+ RV, llvm::Constant::getNullValue(RV->getType()));
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(EndLoc),
+ EmitCheckSourceLocation(RetNNAttr->getLocation()),
+ };
+ EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
+ "nonnull_return", StaticData, None);
+ }
+ }
+ Ret = Builder.CreateRet(RV);
+ } else {
+ Ret = Builder.CreateRetVoid();
+ }
+
if (!RetDbgLoc.isUnknown())
Ret->setDebugLoc(RetDbgLoc);
}
@@ -2045,19 +2388,8 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
return args.add(RValue::get(Builder.CreateLoad(local)), type);
}
- if (isInAllocaArgument(CGM.getCXXABI(), type)) {
- AggValueSlot Slot = createPlaceholderSlot(*this, type);
- Slot.setExternallyDestructed();
-
- // FIXME: Either emit a copy constructor call, or figure out how to do
- // guaranteed tail calls with perfect forwarding in LLVM.
- CGM.ErrorUnsupported(param, "non-trivial argument copy for thunk");
- EmitNullInitialization(Slot.getAddr(), type);
-
- RValue RV = Slot.asRValue();
- args.add(RV, type);
- return;
- }
+ assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
+ "cannot emit delegate call arguments for inalloca arguments!");
args.add(convertTempToRValue(local, type, loc), type);
}
@@ -2317,10 +2649,36 @@ void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
}
}
+static void emitNonNullArgCheck(CodeGenFunction &CGF, RValue RV,
+ QualType ArgType, SourceLocation ArgLoc,
+ const FunctionDecl *FD, unsigned ParmNum) {
+ if (!CGF.SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
+ return;
+ auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
+ unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
+ auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
+ if (!NNAttr)
+ return;
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+ assert(RV.isScalar());
+ llvm::Value *V = RV.getScalarVal();
+ llvm::Value *Cond =
+ CGF.Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
+ llvm::Constant *StaticData[] = {
+ CGF.EmitCheckSourceLocation(ArgLoc),
+ CGF.EmitCheckSourceLocation(NNAttr->getLocation()),
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgNo + 1),
+ };
+ CGF.EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
+ "nonnull_arg", StaticData, None);
+}
+
void CodeGenFunction::EmitCallArgs(CallArgList &Args,
ArrayRef<QualType> ArgTypes,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
+ const FunctionDecl *CalleeDecl,
+ unsigned ParamsToSkip,
bool ForceColumnInfo) {
CGDebugInfo *DI = getDebugInfo();
SourceLocation CallLoc;
@@ -2344,6 +2702,8 @@ void CodeGenFunction::EmitCallArgs(CallArgList &Args,
for (int I = ArgTypes.size() - 1; I >= 0; --I) {
CallExpr::const_arg_iterator Arg = ArgBeg + I;
EmitCallArg(Args, *Arg, ArgTypes[I]);
+ emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
+ CalleeDecl, ParamsToSkip + I);
// Restore the debug location.
if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
}
@@ -2358,6 +2718,8 @@ void CodeGenFunction::EmitCallArgs(CallArgList &Args,
CallExpr::const_arg_iterator Arg = ArgBeg + I;
assert(Arg != ArgEnd);
EmitCallArg(Args, *Arg, ArgTypes[I]);
+ emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
+ CalleeDecl, ParamsToSkip + I);
// Restore the debug location.
if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
}
@@ -2457,6 +2819,24 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
args.add(EmitAnyExprToTemp(E), type);
}
+QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
+ // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
+ // implicitly widens null pointer constants that are arguments to varargs
+ // functions to pointer-sized ints.
+ if (!getTarget().getTriple().isOSWindows())
+ return Arg->getType();
+
+ if (Arg->getType()->isIntegerType() &&
+ getContext().getTypeSize(Arg->getType()) <
+ getContext().getTargetInfo().getPointerWidth(0) &&
+ Arg->isNullPointerConstant(getContext(),
+ Expr::NPC_ValueDependentIsNotNull)) {
+ return getContext().getIntPtrType();
+ }
+
+ return Arg->getType();
+}
+
// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
// optimizer it can aggressively ignore unwind edges.
void
@@ -2471,7 +2851,7 @@ CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
llvm::CallInst *
CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
const llvm::Twine &name) {
- return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
+ return EmitNounwindRuntimeCall(callee, None, name);
}
/// Emits a call to the given nounwind runtime function.
@@ -2489,7 +2869,7 @@ CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
llvm::CallInst *
CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
const llvm::Twine &name) {
- return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
+ return EmitRuntimeCall(callee, None, name);
}
/// Emits a simple call (never an invoke) to the given runtime
@@ -2528,7 +2908,7 @@ void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
llvm::CallSite
CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
const Twine &name) {
- return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name);
+ return EmitRuntimeCallOrInvoke(callee, None, name);
}
/// Emits a call or invoke instruction to the given runtime function.
@@ -2544,7 +2924,7 @@ CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
llvm::CallSite
CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
const Twine &Name) {
- return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
+ return EmitCallOrInvoke(Callee, None, Name);
}
/// Emits a call or invoke instruction to the given function, depending
@@ -2572,73 +2952,6 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
return Inst;
}
-static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
- llvm::FunctionType *FTy) {
- if (ArgNo < FTy->getNumParams())
- assert(Elt->getType() == FTy->getParamType(ArgNo));
- else
- assert(FTy->isVarArg());
- ++ArgNo;
-}
-
-void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
- SmallVectorImpl<llvm::Value *> &Args,
- llvm::FunctionType *IRFuncTy) {
- if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- unsigned NumElts = AT->getSize().getZExtValue();
- QualType EltTy = AT->getElementType();
- llvm::Value *Addr = RV.getAggregateAddr();
- for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
- llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
- RValue EltRV = convertTempToRValue(EltAddr, EltTy, SourceLocation());
- ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
- }
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- RecordDecl *RD = RT->getDecl();
- assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
- LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
-
- if (RD->isUnion()) {
- const FieldDecl *LargestFD = nullptr;
- CharUnits UnionSize = CharUnits::Zero();
-
- for (const auto *FD : RD->fields()) {
- assert(!FD->isBitField() &&
- "Cannot expand structure with bit-field members.");
- CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
- if (UnionSize < FieldSize) {
- UnionSize = FieldSize;
- LargestFD = FD;
- }
- }
- if (LargestFD) {
- RValue FldRV = EmitRValueForField(LV, LargestFD, SourceLocation());
- ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
- }
- } else {
- for (const auto *FD : RD->fields()) {
- RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
- ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
- }
- }
- } else if (Ty->isAnyComplexType()) {
- ComplexPairTy CV = RV.getComplexVal();
- Args.push_back(CV.first);
- Args.push_back(CV.second);
- } else {
- assert(RV.isScalar() &&
- "Unexpected non-scalar rvalue during struct expansion.");
-
- // Insert a bitcast as needed.
- llvm::Value *V = RV.getScalarVal();
- if (Args.size() < IRFuncTy->getNumParams() &&
- V->getType() != IRFuncTy->getParamType(Args.size()))
- V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
-
- Args.push_back(V);
- }
-}
-
/// \brief Store a non-aggregate value to an address to initialize it. For
/// initialization, a non-atomic store will be used.
static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
@@ -2661,15 +2974,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const Decl *TargetDecl,
llvm::Instruction **callOrInvoke) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
- SmallVector<llvm::Value*, 16> Args;
// Handle struct-return functions by passing a pointer to the
// location that we would like to return into.
QualType RetTy = CallInfo.getReturnType();
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
- // IRArgNo - Keep track of the argument number in the callee we're looking at.
- unsigned IRArgNo = 0;
llvm::FunctionType *IRFuncTy =
cast<llvm::FunctionType>(
cast<llvm::PointerType>(Callee->getType())->getElementType());
@@ -2691,22 +3001,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
ArgMemory = AI;
}
+ ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
+ SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
+
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
llvm::Value *SRetPtr = nullptr;
- bool SwapThisWithSRet = false;
if (RetAI.isIndirect() || RetAI.isInAlloca()) {
SRetPtr = ReturnValue.getValue();
if (!SRetPtr)
SRetPtr = CreateMemTemp(RetTy);
- if (RetAI.isIndirect()) {
- Args.push_back(SRetPtr);
- SwapThisWithSRet = RetAI.isSRetAfterThis();
- if (SwapThisWithSRet)
- IRArgNo = 1;
- checkArgMatches(SRetPtr, IRArgNo, IRFuncTy);
- if (SwapThisWithSRet)
- IRArgNo = 0;
+ if (IRFunctionArgs.hasSRetArg()) {
+ IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr;
} else {
llvm::Value *Addr =
Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
@@ -2716,26 +3022,26 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(CallInfo.arg_size() == CallArgs.size() &&
"Mismatch between function signature & arguments.");
+ unsigned ArgNo = 0;
CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
- I != E; ++I, ++info_it) {
+ I != E; ++I, ++info_it, ++ArgNo) {
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->RV;
- // Skip 'sret' if it came second.
- if (IRArgNo == 1 && SwapThisWithSRet)
- ++IRArgNo;
-
CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
// Insert a padding argument to ensure proper alignment.
- if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
- Args.push_back(llvm::UndefValue::get(PaddingType));
- ++IRArgNo;
- }
+ if (IRFunctionArgs.hasPaddingArg(ArgNo))
+ IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
+ llvm::UndefValue::get(ArgInfo.getPaddingType());
+
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
switch (ArgInfo.getKind()) {
case ABIArgInfo::InAlloca: {
+ assert(NumIRArgs == 0);
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
if (RV.isAggregate()) {
// Replace the placeholder with the appropriate argument slot GEP.
@@ -2761,22 +3067,20 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
EmitInitStoreOfNonAggregate(*this, RV, argLV);
}
- break; // Don't increment IRArgNo!
+ break;
}
case ABIArgInfo::Indirect: {
+ assert(NumIRArgs == 1);
if (RV.isScalar() || RV.isComplex()) {
// Make a temporary alloca to pass the argument.
llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
if (ArgInfo.getIndirectAlign() > AI->getAlignment())
AI->setAlignment(ArgInfo.getIndirectAlign());
- Args.push_back(AI);
+ IRCallArgs[FirstIRArg] = AI;
- LValue argLV = MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
+ LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign);
EmitInitStoreOfNonAggregate(*this, RV, argLV);
-
- // Validate argument match.
- checkArgMatches(AI, IRArgNo, IRFuncTy);
} else {
// We want to avoid creating an unnecessary temporary+copy here;
// however, we need one in three cases:
@@ -2790,8 +3094,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
- const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ?
- IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0);
+ const unsigned ArgAddrSpace =
+ (FirstIRArg < IRFuncTy->getNumParams()
+ ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
+ : 0);
if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
(ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) ||
@@ -2800,23 +3106,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
if (Align > AI->getAlignment())
AI->setAlignment(Align);
- Args.push_back(AI);
+ IRCallArgs[FirstIRArg] = AI;
EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
-
- // Validate argument match.
- checkArgMatches(AI, IRArgNo, IRFuncTy);
} else {
// Skip the extra memcpy call.
- Args.push_back(Addr);
-
- // Validate argument match.
- checkArgMatches(Addr, IRArgNo, IRFuncTy);
+ IRCallArgs[FirstIRArg] = Addr;
}
}
break;
}
case ABIArgInfo::Ignore:
+ assert(NumIRArgs == 0);
break;
case ABIArgInfo::Extend:
@@ -2824,20 +3125,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
ArgInfo.getDirectOffset() == 0) {
+ assert(NumIRArgs == 1);
llvm::Value *V;
if (RV.isScalar())
V = RV.getScalarVal();
else
V = Builder.CreateLoad(RV.getAggregateAddr());
-
+
+ // We might have to widen integers, but we should never truncate.
+ if (ArgInfo.getCoerceToType() != V->getType() &&
+ V->getType()->isIntegerTy())
+ V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
+
// If the argument doesn't match, perform a bitcast to coerce it. This
// can happen due to trivial type mismatches.
- if (IRArgNo < IRFuncTy->getNumParams() &&
- V->getType() != IRFuncTy->getParamType(IRArgNo))
- V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
- Args.push_back(V);
-
- checkArgMatches(V, IRArgNo, IRFuncTy);
+ if (FirstIRArg < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(FirstIRArg))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
+ IRCallArgs[FirstIRArg] = V;
break;
}
@@ -2859,14 +3164,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
- // If the coerce-to type is a first class aggregate, we flatten it and
- // pass the elements. Either way is semantically identical, but fast-isel
- // and the optimizer generally likes scalar values better than FCAs.
- // We cannot do this for functions using the AAPCS calling convention,
- // as structures are treated differently by that calling convention.
+ // Fast-isel and the optimizer generally like scalar values better than
+ // FCAs, so we flatten them if this is safe to do for this argument.
llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
- if (STy && !isAAPCSVFP(CallInfo, getTarget())) {
+ if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -2886,38 +3188,32 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::PointerType::getUnqual(STy));
}
+ assert(NumIRArgs == STy->getNumElements());
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
// We don't know what we're loading from.
LI->setAlignment(1);
- Args.push_back(LI);
-
- // Validate argument match.
- checkArgMatches(LI, IRArgNo, IRFuncTy);
+ IRCallArgs[FirstIRArg + i] = LI;
}
} else {
// In the simple case, just pass the coerced loaded value.
- Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
- *this));
-
- // Validate argument match.
- checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
+ assert(NumIRArgs == 1);
+ IRCallArgs[FirstIRArg] =
+ CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this);
}
break;
}
case ABIArgInfo::Expand:
- ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
- IRArgNo = Args.size();
+ unsigned IRArgPos = FirstIRArg;
+ ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
+ assert(IRArgPos == FirstIRArg + NumIRArgs);
break;
}
}
- if (SwapThisWithSRet)
- std::swap(Args[0], Args[1]);
-
if (ArgMemory) {
llvm::Value *Arg = ArgMemory;
if (CallInfo.isVariadic()) {
@@ -2948,7 +3244,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Arg = Builder.CreateBitCast(Arg, LastParamTy);
}
}
- Args.push_back(Arg);
+ assert(IRFunctionArgs.hasInallocaArg());
+ IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
}
if (!CallArgs.getCleanupsToDeactivate().empty())
@@ -2967,7 +3264,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (CE->getOpcode() == llvm::Instruction::BitCast &&
ActualFT->getReturnType() == CurFT->getReturnType() &&
ActualFT->getNumParams() == CurFT->getNumParams() &&
- ActualFT->getNumParams() == Args.size() &&
+ ActualFT->getNumParams() == IRCallArgs.size() &&
(CurFT->isVarArg() || !ActualFT->isVarArg())) {
bool ArgsMatch = true;
for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
@@ -2984,6 +3281,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
+ assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
+ for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
+ // Inalloca argument can have different type.
+ if (IRFunctionArgs.hasInallocaArg() &&
+ i == IRFunctionArgs.getInallocaArgNo())
+ continue;
+ if (i < IRFuncTy->getNumParams())
+ assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
+ }
+
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
@@ -2998,10 +3305,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::CallSite CS;
if (!InvokeDest) {
- CS = Builder.CreateCall(Callee, Args);
+ CS = Builder.CreateCall(Callee, IRCallArgs);
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs);
EmitBlock(Cont);
}
if (callOrInvoke)
@@ -3050,75 +3357,92 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// lexical order, so deactivate it and run it manually here.
CallArgs.freeArgumentMemory(*this);
- switch (RetAI.getKind()) {
- case ABIArgInfo::InAlloca:
- case ABIArgInfo::Indirect:
- return convertTempToRValue(SRetPtr, RetTy, SourceLocation());
+ RValue Ret = [&] {
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::InAlloca:
+ case ABIArgInfo::Indirect:
+ return convertTempToRValue(SRetPtr, RetTy, SourceLocation());
- case ABIArgInfo::Ignore:
- // If we are ignoring an argument that had a result, make sure to
- // construct the appropriate return value for our caller.
- return GetUndefRValue(RetTy);
+ case ABIArgInfo::Ignore:
+ // If we are ignoring an argument that had a result, make sure to
+ // construct the appropriate return value for our caller.
+ return GetUndefRValue(RetTy);
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct: {
- llvm::Type *RetIRTy = ConvertType(RetTy);
- if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
- switch (getEvaluationKind(RetTy)) {
- case TEK_Complex: {
- llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
- llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
- return RValue::getComplex(std::make_pair(Real, Imag));
- }
- case TEK_Aggregate: {
- llvm::Value *DestPtr = ReturnValue.getValue();
- bool DestIsVolatile = ReturnValue.isVolatile();
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ llvm::Type *RetIRTy = ConvertType(RetTy);
+ if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
+ switch (getEvaluationKind(RetTy)) {
+ case TEK_Complex: {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ case TEK_Aggregate: {
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
- if (!DestPtr) {
- DestPtr = CreateMemTemp(RetTy, "agg.tmp");
- DestIsVolatile = false;
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "agg.tmp");
+ DestIsVolatile = false;
+ }
+ BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
+ return RValue::getAggregate(DestPtr);
}
- BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
- return RValue::getAggregate(DestPtr);
- }
- case TEK_Scalar: {
- // If the argument doesn't match, perform a bitcast to coerce it. This
- // can happen due to trivial type mismatches.
- llvm::Value *V = CI;
- if (V->getType() != RetIRTy)
- V = Builder.CreateBitCast(V, RetIRTy);
- return RValue::get(V);
+ case TEK_Scalar: {
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ llvm::Value *V = CI;
+ if (V->getType() != RetIRTy)
+ V = Builder.CreateBitCast(V, RetIRTy);
+ return RValue::get(V);
+ }
+ }
+ llvm_unreachable("bad evaluation kind");
}
+
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "coerce");
+ DestIsVolatile = false;
}
- llvm_unreachable("bad evaluation kind");
- }
- llvm::Value *DestPtr = ReturnValue.getValue();
- bool DestIsVolatile = ReturnValue.isVolatile();
+ // If the value is offset in memory, apply the offset now.
+ llvm::Value *StorePtr = DestPtr;
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
+ StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
+ StorePtr = Builder.CreateBitCast(StorePtr,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
- if (!DestPtr) {
- DestPtr = CreateMemTemp(RetTy, "coerce");
- DestIsVolatile = false;
+ return convertTempToRValue(DestPtr, RetTy, SourceLocation());
}
- // If the value is offset in memory, apply the offset now.
- llvm::Value *StorePtr = DestPtr;
- if (unsigned Offs = RetAI.getDirectOffset()) {
- StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
- StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
- StorePtr = Builder.CreateBitCast(StorePtr,
- llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
}
- CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
- return convertTempToRValue(DestPtr, RetTy, SourceLocation());
- }
+ llvm_unreachable("Unhandled ABIArgInfo::Kind");
+ } ();
- case ABIArgInfo::Expand:
- llvm_unreachable("Invalid ABI kind for return argument");
+ if (Ret.isScalar() && TargetDecl) {
+ if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
+ llvm::Value *OffsetValue = nullptr;
+ if (const auto *Offset = AA->getOffset())
+ OffsetValue = EmitScalarExpr(Offset);
+
+ llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
+ llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
+ EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
+ OffsetValue);
+ }
}
- llvm_unreachable("Unhandled ABIArgInfo::Kind");
+ return Ret;
}
/* VarArg handling */
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 9510a1cd5461..b228733fb8ce 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGCALL_H
-#define CLANG_CODEGEN_CGCALL_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGCALL_H
+#define LLVM_CLANG_LIB_CODEGEN_CGCALL_H
#include "CGValue.h"
#include "EHScopeStack.h"
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 9427de14d704..92c694a76de7 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -134,12 +134,11 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
return ptr;
}
-llvm::Value *
-CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
- const CXXRecordDecl *Derived,
- CastExpr::path_const_iterator PathBegin,
- CastExpr::path_const_iterator PathEnd,
- bool NullCheckValue) {
+llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
+ llvm::Value *Value, const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd, bool NullCheckValue,
+ SourceLocation Loc) {
assert(PathBegin != PathEnd && "Base path should not be empty!");
CastExpr::path_const_iterator Start = PathBegin;
@@ -176,9 +175,16 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
llvm::Type *BasePtrTy =
ConvertType((PathEnd[-1])->getType())->getPointerTo();
+ QualType DerivedTy = getContext().getRecordType(Derived);
+ CharUnits DerivedAlign = getContext().getTypeAlignInChars(DerivedTy);
+
// If the static offset is zero and we don't have a virtual step,
// just do a bitcast; null checks are unnecessary.
if (NonVirtualOffset.isZero() && !VBase) {
+ if (sanitizePerformTypeCheck()) {
+ EmitTypeCheck(TCK_Upcast, Loc, Value, DerivedTy, DerivedAlign,
+ !NullCheckValue);
+ }
return Builder.CreateBitCast(Value, BasePtrTy);
}
@@ -197,6 +203,11 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
EmitBlock(notNullBB);
}
+ if (sanitizePerformTypeCheck()) {
+ EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, Value,
+ DerivedTy, DerivedAlign, true);
+ }
+
// Compute the virtual offset.
llvm::Value *VirtualOffset = nullptr;
if (VBase) {
@@ -533,6 +544,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CXXCtorInitializer *MemberInit,
const CXXConstructorDecl *Constructor,
FunctionArgList &Args) {
+ ApplyDebugLocation Loc(CGF, MemberInit->getMemberLocation());
assert(MemberInit->isAnyMemberInitializer() &&
"Must have member initializer!");
assert(MemberInit->getInit() && "Must have initializer!");
@@ -569,9 +581,8 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
if (BaseElementTy.isPODType(CGF.getContext()) ||
(CE && CE->getConstructor()->isTrivial())) {
- // Find the source pointer. We know it's the last argument because
- // we know we're in an implicit copy constructor.
- unsigned SrcArgIndex = Args.size() - 1;
+ unsigned SrcArgIndex =
+ CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args);
llvm::Value *SrcPtr
= CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
@@ -587,12 +598,13 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
ArrayRef<VarDecl *> ArrayIndexes;
if (MemberInit->getNumArrayIndices())
ArrayIndexes = MemberInit->getArrayIndexes();
+ ApplyDebugLocation DL(CGF, MemberInit->getMemberLocation());
CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
}
-void CodeGenFunction::EmitInitializerForField(FieldDecl *Field,
- LValue LHS, Expr *Init,
- ArrayRef<VarDecl *> ArrayIndexes) {
+void CodeGenFunction::EmitInitializerForField(
+ FieldDecl *Field, LValue LHS, Expr *Init,
+ ArrayRef<VarDecl *> ArrayIndexes) {
QualType FieldType = Field->getType();
switch (getEvaluationKind(FieldType)) {
case TEK_Scalar:
@@ -692,8 +704,74 @@ static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
return true;
}
+// Emit code in ctor (Prologue==true) or dtor (Prologue==false)
+// to poison the extra field paddings inserted under
+// -fsanitize-address-field-padding=1|2.
+void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) {
+ ASTContext &Context = getContext();
+ const CXXRecordDecl *ClassDecl =
+ Prologue ? cast<CXXConstructorDecl>(CurGD.getDecl())->getParent()
+ : cast<CXXDestructorDecl>(CurGD.getDecl())->getParent();
+ if (!ClassDecl->mayInsertExtraPadding()) return;
+
+ struct SizeAndOffset {
+ uint64_t Size;
+ uint64_t Offset;
+ };
+
+ unsigned PtrSize = CGM.getDataLayout().getPointerSizeInBits();
+ const ASTRecordLayout &Info = Context.getASTRecordLayout(ClassDecl);
+
+ // Populate sizes and offsets of fields.
+ SmallVector<SizeAndOffset, 16> SSV(Info.getFieldCount());
+ for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i)
+ SSV[i].Offset =
+ Context.toCharUnitsFromBits(Info.getFieldOffset(i)).getQuantity();
+
+ size_t NumFields = 0;
+ for (const auto *Field : ClassDecl->fields()) {
+ const FieldDecl *D = Field;
+ std::pair<CharUnits, CharUnits> FieldInfo =
+ Context.getTypeInfoInChars(D->getType());
+ CharUnits FieldSize = FieldInfo.first;
+ assert(NumFields < SSV.size());
+ SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity();
+ NumFields++;
+ }
+ assert(NumFields == SSV.size());
+ if (SSV.size() <= 1) return;
+
+ // We will insert calls to __asan_* run-time functions.
+ // LLVM AddressSanitizer pass may decide to inline them later.
+ llvm::Type *Args[2] = {IntPtrTy, IntPtrTy};
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, Args, false);
+ llvm::Constant *F = CGM.CreateRuntimeFunction(
+ FTy, Prologue ? "__asan_poison_intra_object_redzone"
+ : "__asan_unpoison_intra_object_redzone");
+
+ llvm::Value *ThisPtr = LoadCXXThis();
+ ThisPtr = Builder.CreatePtrToInt(ThisPtr, IntPtrTy);
+ uint64_t TypeSize = Info.getNonVirtualSize().getQuantity();
+ // For each field check if it has sufficient padding,
+ // if so (un)poison it with a call.
+ for (size_t i = 0; i < SSV.size(); i++) {
+ uint64_t AsanAlignment = 8;
+ uint64_t NextField = i == SSV.size() - 1 ? TypeSize : SSV[i + 1].Offset;
+ uint64_t PoisonSize = NextField - SSV[i].Offset - SSV[i].Size;
+ uint64_t EndOffset = SSV[i].Offset + SSV[i].Size;
+ if (PoisonSize < AsanAlignment || !SSV[i].Size ||
+ (NextField % AsanAlignment) != 0)
+ continue;
+ Builder.CreateCall2(
+ F, Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)),
+ Builder.getIntN(PtrSize, PoisonSize));
+ }
+}
+
/// EmitConstructorBody - Emits the body of the current constructor.
void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
+ EmitAsanPrologueOrEpilogue(true);
const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
CXXCtorType CtorType = CurGD.getCtorType();
@@ -705,13 +783,13 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// delegation optimization.
if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
CGM.getTarget().getCXXABI().hasConstructorVariants()) {
- if (CGDebugInfo *DI = getDebugInfo())
- DI->EmitLocation(Builder, Ctor->getLocEnd());
EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd());
return;
}
- Stmt *Body = Ctor->getBody();
+ const FunctionDecl *Definition = 0;
+ Stmt *Body = Ctor->getBody(Definition);
+ assert(Definition == Ctor && "emitting wrong constructor body");
// Enter the function-try-block before the constructor prologue if
// applicable.
@@ -755,18 +833,16 @@ namespace {
class CopyingValueRepresentation {
public:
explicit CopyingValueRepresentation(CodeGenFunction &CGF)
- : CGF(CGF), SO(*CGF.SanOpts), OldSanOpts(CGF.SanOpts) {
- SO.Bool = false;
- SO.Enum = false;
- CGF.SanOpts = &SO;
+ : CGF(CGF), OldSanOpts(CGF.SanOpts) {
+ CGF.SanOpts.set(SanitizerKind::Bool, false);
+ CGF.SanOpts.set(SanitizerKind::Enum, false);
}
~CopyingValueRepresentation() {
CGF.SanOpts = OldSanOpts;
}
private:
CodeGenFunction &CGF;
- SanitizerOptions SO;
- const SanitizerOptions *OldSanOpts;
+ SanitizerSet OldSanOpts;
};
}
@@ -780,7 +856,10 @@ namespace {
FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0),
LastFieldOffset(0), LastAddedFieldIndex(0) {}
- static bool isMemcpyableField(FieldDecl *F) {
+ bool isMemcpyableField(FieldDecl *F) const {
+ // Never memcpy fields when we are adding poisoned paddings.
+ if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding)
+ return false;
Qualifiers Qual = F->getType().getQualifiers();
if (Qual.hasVolatile() || Qual.hasObjCLifetime())
return false;
@@ -794,13 +873,13 @@ namespace {
addNextField(F);
}
- CharUnits getMemcpySize() const {
+ CharUnits getMemcpySize(uint64_t FirstByteOffset) const {
unsigned LastFieldSize =
LastField->isBitField() ?
LastField->getBitWidthValue(CGF.getContext()) :
CGF.getContext().getTypeSize(LastField->getType());
uint64_t MemcpySizeBits =
- LastFieldOffset + LastFieldSize - FirstFieldOffset +
+ LastFieldOffset + LastFieldSize - FirstByteOffset +
CGF.getContext().getCharWidth() - 1;
CharUnits MemcpySize =
CGF.getContext().toCharUnitsFromBits(MemcpySizeBits);
@@ -816,19 +895,31 @@ namespace {
CharUnits Alignment;
+ uint64_t FirstByteOffset;
if (FirstField->isBitField()) {
const CGRecordLayout &RL =
CGF.getTypes().getCGRecordLayout(FirstField->getParent());
const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField);
Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment);
+ // FirstFieldOffset is not appropriate for bitfields,
+ // it won't tell us what the storage offset should be and thus might not
+ // be properly aligned.
+ //
+ // Instead calculate the storage offset using the offset of the field in
+ // the struct type.
+ const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
+ FirstByteOffset =
+ DL.getStructLayout(RL.getLLVMType())
+ ->getElementOffsetInBits(RL.getLLVMFieldNo(FirstField));
} else {
Alignment = CGF.getContext().getDeclAlign(FirstField);
+ FirstByteOffset = FirstFieldOffset;
}
- assert((CGF.getContext().toCharUnitsFromBits(FirstFieldOffset) %
+ assert((CGF.getContext().toCharUnitsFromBits(FirstByteOffset) %
Alignment) == 0 && "Bad field alignment.");
- CharUnits MemcpySize = getMemcpySize();
+ CharUnits MemcpySize = getMemcpySize(FirstByteOffset);
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
@@ -912,11 +1003,12 @@ namespace {
private:
/// Get source argument for copy constructor. Returns null if not a copy
- /// constructor.
- static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD,
+ /// constructor.
+ static const VarDecl *getTrivialCopySource(CodeGenFunction &CGF,
+ const CXXConstructorDecl *CD,
FunctionArgList &Args) {
if (CD->isCopyOrMoveConstructor() && CD->isDefaulted())
- return Args[Args.size() - 1];
+ return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)];
return nullptr;
}
@@ -947,7 +1039,7 @@ namespace {
public:
ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD,
FunctionArgList &Args)
- : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)),
+ : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CGF, CD, Args)),
ConstructorDecl(CD),
MemcpyableCtor(CD->isDefaulted() &&
CD->isCopyOrMoveConstructor() &&
@@ -1279,6 +1371,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
bool isTryBody = (Body && isa<CXXTryStmt>(Body));
if (isTryBody)
EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+ EmitAsanPrologueOrEpilogue(false);
// Enter the epilogue cleanups.
RunCleanupsScope DtorEpilogue(*this);
@@ -1289,6 +1382,9 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// we'd introduce *two* handler blocks. In the Microsoft ABI, we
// always delegate because we might not have a definition in this TU.
switch (DtorType) {
+ case Dtor_Comdat:
+ llvm_unreachable("not expecting a COMDAT");
+
case Dtor_Deleting: llvm_unreachable("already handled deleting case");
case Dtor_Complete:
@@ -1515,19 +1611,14 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
/// \param arrayBegin an arrayType*
/// \param zeroInitialize true if each element should be
/// zero-initialized before it is constructed
-void
-CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
- const ConstantArrayType *arrayType,
- llvm::Value *arrayBegin,
- CallExpr::const_arg_iterator argBegin,
- CallExpr::const_arg_iterator argEnd,
- bool zeroInitialize) {
+void CodeGenFunction::EmitCXXAggrConstructorCall(
+ const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType,
+ llvm::Value *arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) {
QualType elementType;
llvm::Value *numElements =
emitArrayLength(arrayType, elementType, arrayBegin);
- EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin,
- argBegin, argEnd, zeroInitialize);
+ EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, zeroInitialize);
}
/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
@@ -1539,13 +1630,11 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
/// \param arrayBegin a T*, where T is the type constructed by ctor
/// \param zeroInitialize true if each element should be
/// zero-initialized before it is constructed
-void
-CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
- llvm::Value *numElements,
- llvm::Value *arrayBegin,
- CallExpr::const_arg_iterator argBegin,
- CallExpr::const_arg_iterator argEnd,
- bool zeroInitialize) {
+void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ llvm::Value *numElements,
+ llvm::Value *arrayBegin,
+ const CXXConstructExpr *E,
+ bool zeroInitialize) {
// It's legal for numElements to be zero. This can happen both
// dynamically, because x can be zero in 'new A[x]', and statically,
@@ -1608,8 +1697,8 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
}
- EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false,
- /*Delegating=*/false, cur, argBegin, argEnd);
+ EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false,
+ /*Delegating=*/false, cur, E);
}
// Go to the next element.
@@ -1640,29 +1729,27 @@ void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
/*Delegating=*/false, addr);
}
-void
-CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
- CXXCtorType Type, bool ForVirtualBase,
- bool Delegating,
- llvm::Value *This,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
+void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ bool ForVirtualBase,
+ bool Delegating, llvm::Value *This,
+ const CXXConstructExpr *E) {
// If this is a trivial constructor, just emit what's needed.
- if (D->isTrivial()) {
- if (ArgBeg == ArgEnd) {
+ if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) {
+ if (E->getNumArgs() == 0) {
// Trivial default constructor, no codegen required.
assert(D->isDefaultConstructor() &&
"trivial 0-arg ctor not a default ctor");
return;
}
- assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+ assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
assert(D->isCopyOrMoveConstructor() &&
"trivial 1-arg ctor not a copy/move ctor");
- const Expr *E = (*ArgBeg);
- QualType Ty = E->getType();
- llvm::Value *Src = EmitLValue(E).getAddress();
+ const Expr *Arg = E->getArg(0);
+ QualType Ty = Arg->getType();
+ llvm::Value *Src = EmitLValue(Arg).getAddress();
EmitAggregateCopy(This, Src, Ty);
return;
}
@@ -1681,14 +1768,14 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
// Add the rest of the user-supplied arguments.
const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
- EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+ EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getConstructor());
// Insert any ABI-specific implicit constructor arguments.
unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs(
*this, D, Type, ForVirtualBase, Delegating, Args);
// Emit the call.
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
+ llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
const CGFunctionInfo &Info =
CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
EmitCall(Info, Callee, ReturnValueSlot(), Args, D);
@@ -1697,16 +1784,16 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
void
CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
llvm::Value *This, llvm::Value *Src,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
- if (D->isTrivial()) {
- assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+ const CXXConstructExpr *E) {
+ if (D->isTrivial() &&
+ !D->getParent()->mayInsertExtraPadding()) {
+ assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
assert(D->isCopyOrMoveConstructor() &&
"trivial 1-arg ctor not a copy/move ctor");
- EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
+ EmitAggregateCopy(This, Src, E->arg_begin()->getType());
return;
}
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, clang::Ctor_Complete);
+ llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete);
assert(D->isInstance() &&
"Trying to emit a member call expr on a static method!");
@@ -1724,8 +1811,8 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
Args.add(RValue::get(Src), QT);
// Skip over first argument (Src).
- EmitCallArgs(Args, FPT->isVariadic(), FPT->param_type_begin() + 1,
- FPT->param_type_end(), ArgBeg + 1, ArgEnd);
+ EmitCallArgs(Args, FPT, E->arg_begin() + 1, E->arg_end(), E->getConstructor(),
+ /*ParamsToSkip*/ 1);
EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
Callee, ReturnValueSlot(), Args, D);
@@ -1766,8 +1853,10 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
EmitDelegateCallArg(DelegateArgs, param, Loc);
}
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType);
- EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
+ llvm::Value *Callee =
+ CGM.getAddrOfCXXStructor(Ctor, getFromCtorType(CtorType));
+ EmitCall(CGM.getTypes()
+ .arrangeCXXStructorDeclaration(Ctor, getFromCtorType(CtorType)),
Callee, ReturnValueSlot(), DelegateArgs, Ctor);
}
@@ -1894,10 +1983,14 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
NonVirtualOffset,
VirtualOffset);
- // Finally, store the address point.
- llvm::Type *AddressPointPtrTy =
- VTableAddressPoint->getType()->getPointerTo();
- VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
+ // Finally, store the address point. Use the same LLVM types as the field to
+ // support optimization.
+ llvm::Type *VTablePtrTy =
+ llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true)
+ ->getPointerTo()
+ ->getPointerTo();
+ VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo());
+ VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy);
llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
}
@@ -1934,7 +2027,7 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
if (I.isVirtual()) {
// Check if we've visited this virtual base before.
- if (!VBases.insert(BaseDecl))
+ if (!VBases.insert(BaseDecl).second)
continue;
const ASTRecordLayout &Layout =
@@ -2075,20 +2168,6 @@ CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
return false;
}
-llvm::Value *
-CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
- const CXXMethodDecl *MD,
- llvm::Value *This) {
- llvm::FunctionType *fnType =
- CGM.getTypes().GetFunctionType(
- CGM.getTypes().arrangeCXXMethodDeclaration(MD));
-
- if (MD->isVirtual() && !CanDevirtualizeMemberFunctionCall(E->getArg(0), MD))
- return CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, fnType);
-
- return CGM.GetAddrOfFunction(MD, fnType);
-}
-
void CodeGenFunction::EmitForwardingCallToLambda(
const CXXMethodDecl *callOperator,
CallArgList &callArgs) {
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index ed9f96df7987..18ed3e543d20 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -184,7 +184,7 @@ void EHScopeStack::popCleanup() {
StartOfData += Cleanup.getAllocatedSize();
// Destroy the cleanup.
- Cleanup.~EHCleanupScope();
+ Cleanup.Destroy();
// Check whether we can shrink the branch-fixups stack.
if (!BranchFixups.empty()) {
@@ -301,7 +301,8 @@ static void ResolveAllBranchFixups(CodeGenFunction &CGF,
}
// Don't add this case to the switch statement twice.
- if (!CasesAdded.insert(Fixup.Destination)) continue;
+ if (!CasesAdded.insert(Fixup.Destination).second)
+ continue;
Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex),
Fixup.Destination);
@@ -357,7 +358,7 @@ void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
continue;
// Don't process the same optimistic branch block twice.
- if (!ModifiedOptimisticBlocks.insert(BranchBB))
+ if (!ModifiedOptimisticBlocks.insert(BranchBB).second)
continue;
llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
@@ -860,10 +861,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// Emit the EH cleanup if required.
if (RequiresEHCleanup) {
- CGDebugInfo *DI = getDebugInfo();
- SaveAndRestoreLocation AutoRestoreLocation(*this, Builder);
- if (DI)
- DI->EmitLocation(Builder, CurEHLocation);
+ ApplyDebugLocation AutoRestoreLocation(*this, CurEHLocation);
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
diff --git a/lib/CodeGen/CGCleanup.h b/lib/CodeGen/CGCleanup.h
index 1d4606f13669..dd156c696ad3 100644
--- a/lib/CodeGen/CGCleanup.h
+++ b/lib/CodeGen/CGCleanup.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGCLEANUP_H
-#define CLANG_CODEGEN_CGCLEANUP_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGCLEANUP_H
+#define LLVM_CLANG_LIB_CODEGEN_CGCLEANUP_H
#include "EHScopeStack.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -280,9 +280,11 @@ public:
assert(CleanupBits.CleanupSize == cleanupSize && "cleanup size overflow");
}
- ~EHCleanupScope() {
+ void Destroy() {
delete ExtInfo;
}
+ // Objects of EHCleanupScope are not destructed. Use Destroy().
+ ~EHCleanupScope() LLVM_DELETED_FUNCTION;
bool isNormalCleanup() const { return CleanupBits.IsNormalCleanup; }
llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
@@ -341,7 +343,7 @@ public:
void addBranchAfter(llvm::ConstantInt *Index,
llvm::BasicBlock *Block) {
struct ExtInfo &ExtInfo = getExtInfo();
- if (ExtInfo.Branches.insert(Block))
+ if (ExtInfo.Branches.insert(Block).second)
ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index));
}
@@ -376,7 +378,7 @@ public:
///
/// \return true if the branch-through was new to this scope
bool addBranchThrough(llvm::BasicBlock *Block) {
- return getExtInfo().Branches.insert(Block);
+ return getExtInfo().Branches.insert(Block).second;
}
/// Determines if this cleanup scope has any branch throughs.
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 048c8f8f3674..978e1bb5b81f 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -52,64 +52,59 @@ CGDebugInfo::~CGDebugInfo() {
"Region stack mismatch, stack not empty!");
}
-SaveAndRestoreLocation::SaveAndRestoreLocation(CodeGenFunction &CGF,
- CGBuilderTy &B)
- : DI(CGF.getDebugInfo()), Builder(B) {
- if (DI) {
- SavedLoc = DI->getLocation();
- DI->CurLoc = SourceLocation();
+ArtificialLocation::ArtificialLocation(CodeGenFunction &CGF)
+ : ApplyDebugLocation(CGF) {
+ if (auto *DI = CGF.getDebugInfo()) {
+ // Construct a location that has a valid scope, but no line info.
+ assert(!DI->LexicalBlockStack.empty());
+ llvm::DIDescriptor Scope(DI->LexicalBlockStack.back());
+ CGF.Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(0, 0, Scope));
}
}
-SaveAndRestoreLocation::~SaveAndRestoreLocation() {
- if (DI)
- DI->EmitLocation(Builder, SavedLoc);
-}
-
-NoLocation::NoLocation(CodeGenFunction &CGF, CGBuilderTy &B)
- : SaveAndRestoreLocation(CGF, B) {
- if (DI)
- Builder.SetCurrentDebugLocation(llvm::DebugLoc());
-}
-
-NoLocation::~NoLocation() {
- if (DI)
- assert(Builder.getCurrentDebugLocation().isUnknown());
-}
-
-ArtificialLocation::ArtificialLocation(CodeGenFunction &CGF, CGBuilderTy &B)
- : SaveAndRestoreLocation(CGF, B) {
- if (DI)
- Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF,
+ SourceLocation TemporaryLocation,
+ bool ForceColumnInfo)
+ : CGF(CGF) {
+ if (auto *DI = CGF.getDebugInfo()) {
+ OriginalLocation = CGF.Builder.getCurrentDebugLocation();
+ if (TemporaryLocation.isInvalid())
+ CGF.Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ else
+ DI->EmitLocation(CGF.Builder, TemporaryLocation, ForceColumnInfo);
+ }
}
-void ArtificialLocation::Emit() {
- if (DI) {
- // Sync the Builder.
- DI->EmitLocation(Builder, SavedLoc);
- DI->CurLoc = SourceLocation();
- // Construct a location that has a valid scope, but no line info.
- assert(!DI->LexicalBlockStack.empty());
- llvm::DIDescriptor Scope(DI->LexicalBlockStack.back());
- Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(0, 0, Scope));
+ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc)
+ : CGF(CGF) {
+ if (CGF.getDebugInfo()) {
+ OriginalLocation = CGF.Builder.getCurrentDebugLocation();
+ if (!Loc.isUnknown())
+ CGF.Builder.SetCurrentDebugLocation(Loc);
}
}
-ArtificialLocation::~ArtificialLocation() {
- if (DI)
- assert(Builder.getCurrentDebugLocation().getLine() == 0);
+ApplyDebugLocation::~ApplyDebugLocation() {
+ // Query CGF so the location isn't overwritten when location updates are
+ // temporarily disabled (for C++ default function arguments)
+ if (CGF.getDebugInfo())
+ CGF.Builder.SetCurrentDebugLocation(OriginalLocation);
}
+/// ArtificialLocation - An RAII object that temporarily switches to
+/// an artificial debug location that has a valid scope, but no line
void CGDebugInfo::setLocation(SourceLocation Loc) {
// If the new location isn't valid return.
- if (Loc.isInvalid()) return;
+ if (Loc.isInvalid())
+ return;
CurLoc = CGM.getContext().getSourceManager().getExpansionLoc(Loc);
// If we've changed files in the middle of a lexical scope go ahead
// and create a new lexical scope with file node if it's different
// from the one in the scope.
- if (LexicalBlockStack.empty()) return;
+ if (LexicalBlockStack.empty())
+ return;
SourceManager &SM = CGM.getContext().getSourceManager();
llvm::DIScope Scope(LexicalBlockStack.back());
@@ -120,18 +115,17 @@ void CGDebugInfo::setLocation(SourceLocation Loc) {
if (Scope.isLexicalBlockFile()) {
llvm::DILexicalBlockFile LBF = llvm::DILexicalBlockFile(Scope);
- llvm::DIDescriptor D
- = DBuilder.createLexicalBlockFile(LBF.getScope(),
- getOrCreateFile(CurLoc));
+ llvm::DIDescriptor D = DBuilder.createLexicalBlockFile(
+ LBF.getScope(), getOrCreateFile(CurLoc));
llvm::MDNode *N = D;
LexicalBlockStack.pop_back();
- LexicalBlockStack.push_back(N);
+ LexicalBlockStack.emplace_back(N);
} else if (Scope.isLexicalBlock() || Scope.isSubprogram()) {
- llvm::DIDescriptor D
- = DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc));
+ llvm::DIDescriptor D =
+ DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc));
llvm::MDNode *N = D;
LexicalBlockStack.pop_back();
- LexicalBlockStack.push_back(N);
+ LexicalBlockStack.emplace_back(N);
}
}
@@ -140,10 +134,9 @@ llvm::DIScope CGDebugInfo::getContextDescriptor(const Decl *Context) {
if (!Context)
return TheCU;
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
- I = RegionMap.find(Context);
+ auto I = RegionMap.find(Context);
if (I != RegionMap.end()) {
- llvm::Value *V = I->second;
+ llvm::Metadata *V = I->second;
return llvm::DIScope(dyn_cast_or_null<llvm::MDNode>(V));
}
@@ -154,7 +147,7 @@ llvm::DIScope CGDebugInfo::getContextDescriptor(const Decl *Context) {
if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context))
if (!RDecl->isDependentType())
return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
- getOrCreateMainFile());
+ getOrCreateMainFile());
return TheCU;
}
@@ -162,10 +155,10 @@ llvm::DIScope CGDebugInfo::getContextDescriptor(const Decl *Context) {
/// name is constructed on demand (e.g. C++ destructor) then the name
/// is stored on the side.
StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
- assert (FD && "Invalid FunctionDecl!");
+ assert(FD && "Invalid FunctionDecl!");
IdentifierInfo *FII = FD->getIdentifier();
- FunctionTemplateSpecializationInfo *Info
- = FD->getTemplateSpecializationInfo();
+ FunctionTemplateSpecializationInfo *Info =
+ FD->getTemplateSpecializationInfo();
if (!Info && FII)
return FII->getName();
@@ -194,20 +187,20 @@ StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
const DeclContext *DC = OMD->getDeclContext();
if (const ObjCImplementationDecl *OID =
- dyn_cast<const ObjCImplementationDecl>(DC)) {
- OS << OID->getName();
+ dyn_cast<const ObjCImplementationDecl>(DC)) {
+ OS << OID->getName();
} else if (const ObjCInterfaceDecl *OID =
- dyn_cast<const ObjCInterfaceDecl>(DC)) {
- OS << OID->getName();
+ dyn_cast<const ObjCInterfaceDecl>(DC)) {
+ OS << OID->getName();
} else if (const ObjCCategoryImplDecl *OCD =
- dyn_cast<const ObjCCategoryImplDecl>(DC)){
- OS << ((const NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
- OCD->getIdentifier()->getNameStart() << ')';
+ dyn_cast<const ObjCCategoryImplDecl>(DC)) {
+ OS << ((const NamedDecl *)OCD)->getIdentifier()->getNameStart() << '('
+ << OCD->getIdentifier()->getNameStart() << ')';
} else if (isa<ObjCProtocolDecl>(DC)) {
// We can extract the type of the class from the self pointer.
- if (ImplicitParamDecl* SelfDecl = OMD->getSelfDecl()) {
+ if (ImplicitParamDecl *SelfDecl = OMD->getSelfDecl()) {
QualType ClassTy =
- cast<ObjCObjectPointerType>(SelfDecl->getType())->getPointeeType();
+ cast<ObjCObjectPointerType>(SelfDecl->getType())->getPointeeType();
ClassTy.print(OS, PrintingPolicy(LangOptions()));
}
}
@@ -223,8 +216,7 @@ StringRef CGDebugInfo::getSelectorName(Selector S) {
}
/// getClassName - Get class name including template argument list.
-StringRef
-CGDebugInfo::getClassName(const RecordDecl *RD) {
+StringRef CGDebugInfo::getClassName(const RecordDecl *RD) {
// quick optimization to avoid having to intern strings that are already
// stored reliably elsewhere
if (!isa<ClassTemplateSpecializationDecl>(RD))
@@ -256,18 +248,17 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
// Cache the results.
const char *fname = PLoc.getFilename();
- llvm::DenseMap<const char *, llvm::WeakVH>::iterator it =
- DIFileCache.find(fname);
+ auto it = DIFileCache.find(fname);
if (it != DIFileCache.end()) {
// Verify that the information still exists.
- if (llvm::Value *V = it->second)
+ if (llvm::Metadata *V = it->second)
return llvm::DIFile(cast<llvm::MDNode>(V));
}
llvm::DIFile F = DBuilder.createFile(PLoc.getFilename(), getCurrentDirname());
- DIFileCache[fname] = F;
+ DIFileCache[fname].reset(F);
return F;
}
@@ -283,7 +274,7 @@ unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
- return PLoc.isValid()? PLoc.getLine() : 0;
+ return PLoc.isValid() ? PLoc.getLine() : 0;
}
/// getColumnNumber - Get column number for the location.
@@ -297,7 +288,7 @@ unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc, bool Force) {
return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
- return PLoc.isValid()? PLoc.getColumn() : 0;
+ return PLoc.isValid() ? PLoc.getColumn() : 0;
}
StringRef CGDebugInfo::getCurrentDirname() {
@@ -388,8 +379,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
StringRef BTName;
switch (BT->getKind()) {
#define BUILTIN_TYPE(Id, SingletonId)
-#define PLACEHOLDER_TYPE(Id, SingletonId) \
- case BuiltinType::Id:
+#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Dependent:
llvm_unreachable("Unexpected builtin type");
@@ -425,8 +415,10 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
DBuilder.createStructType(TheCU, "objc_object", getOrCreateMainFile(),
0, 0, 0, 0, llvm::DIType(), llvm::DIArray());
- ObjTy.setTypeArray(DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
- ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0, 0, ISATy)));
+ DBuilder.replaceArrays(
+ ObjTy,
+ DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
+ ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0, 0, ISATy)));
return ObjTy;
}
case BuiltinType::ObjCSel: {
@@ -438,8 +430,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
}
case BuiltinType::OCLImage1d:
- return getOrCreateStructPtrType("opencl_image1d_t",
- OCLImage1dDITy);
+ return getOrCreateStructPtrType("opencl_image1d_t", OCLImage1dDITy);
case BuiltinType::OCLImage1dArray:
return getOrCreateStructPtrType("opencl_image1d_array_t",
OCLImage1dArrayDITy);
@@ -447,53 +438,71 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
return getOrCreateStructPtrType("opencl_image1d_buffer_t",
OCLImage1dBufferDITy);
case BuiltinType::OCLImage2d:
- return getOrCreateStructPtrType("opencl_image2d_t",
- OCLImage2dDITy);
+ return getOrCreateStructPtrType("opencl_image2d_t", OCLImage2dDITy);
case BuiltinType::OCLImage2dArray:
return getOrCreateStructPtrType("opencl_image2d_array_t",
OCLImage2dArrayDITy);
case BuiltinType::OCLImage3d:
- return getOrCreateStructPtrType("opencl_image3d_t",
- OCLImage3dDITy);
+ return getOrCreateStructPtrType("opencl_image3d_t", OCLImage3dDITy);
case BuiltinType::OCLSampler:
- return DBuilder.createBasicType("opencl_sampler_t",
- CGM.getContext().getTypeSize(BT),
- CGM.getContext().getTypeAlign(BT),
- llvm::dwarf::DW_ATE_unsigned);
+ return DBuilder.createBasicType(
+ "opencl_sampler_t", CGM.getContext().getTypeSize(BT),
+ CGM.getContext().getTypeAlign(BT), llvm::dwarf::DW_ATE_unsigned);
case BuiltinType::OCLEvent:
- return getOrCreateStructPtrType("opencl_event_t",
- OCLEventDITy);
+ return getOrCreateStructPtrType("opencl_event_t", OCLEventDITy);
case BuiltinType::UChar:
- case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
+ case BuiltinType::Char_U:
+ Encoding = llvm::dwarf::DW_ATE_unsigned_char;
+ break;
case BuiltinType::Char_S:
- case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break;
+ case BuiltinType::SChar:
+ Encoding = llvm::dwarf::DW_ATE_signed_char;
+ break;
case BuiltinType::Char16:
- case BuiltinType::Char32: Encoding = llvm::dwarf::DW_ATE_UTF; break;
+ case BuiltinType::Char32:
+ Encoding = llvm::dwarf::DW_ATE_UTF;
+ break;
case BuiltinType::UShort:
case BuiltinType::UInt:
case BuiltinType::UInt128:
case BuiltinType::ULong:
case BuiltinType::WChar_U:
- case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break;
+ case BuiltinType::ULongLong:
+ Encoding = llvm::dwarf::DW_ATE_unsigned;
+ break;
case BuiltinType::Short:
case BuiltinType::Int:
case BuiltinType::Int128:
case BuiltinType::Long:
case BuiltinType::WChar_S:
- case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break;
- case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break;
+ case BuiltinType::LongLong:
+ Encoding = llvm::dwarf::DW_ATE_signed;
+ break;
+ case BuiltinType::Bool:
+ Encoding = llvm::dwarf::DW_ATE_boolean;
+ break;
case BuiltinType::Half:
case BuiltinType::Float:
case BuiltinType::LongDouble:
- case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break;
+ case BuiltinType::Double:
+ Encoding = llvm::dwarf::DW_ATE_float;
+ break;
}
switch (BT->getKind()) {
- case BuiltinType::Long: BTName = "long int"; break;
- case BuiltinType::LongLong: BTName = "long long int"; break;
- case BuiltinType::ULong: BTName = "long unsigned int"; break;
- case BuiltinType::ULongLong: BTName = "long long unsigned int"; break;
+ case BuiltinType::Long:
+ BTName = "long int";
+ break;
+ case BuiltinType::LongLong:
+ BTName = "long long int";
+ break;
+ case BuiltinType::ULong:
+ BTName = "long unsigned int";
+ break;
+ case BuiltinType::ULongLong:
+ BTName = "long long unsigned int";
+ break;
default:
BTName = BT->getName(CGM.getLangOpts());
break;
@@ -501,8 +510,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(BT);
uint64_t Align = CGM.getContext().getTypeAlign(BT);
- llvm::DIType DbgTy =
- DBuilder.createBasicType(BTName, Size, Align, Encoding);
+ llvm::DIType DbgTy = DBuilder.createBasicType(BTName, Size, Align, Encoding);
return DbgTy;
}
@@ -515,7 +523,7 @@ llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty) {
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
llvm::DIType DbgTy =
- DBuilder.createBasicType("complex", Size, Align, Encoding);
+ DBuilder.createBasicType("complex", Size, Align, Encoding);
return DbgTy;
}
@@ -564,25 +572,23 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
// whereas 'id<protocol>' is treated as an ObjCPointerType. For the
// debug info, we want to emit 'id' in both cases.
if (Ty->isObjCQualifiedIdType())
- return getOrCreateType(CGM.getContext().getObjCIdType(), Unit);
+ return getOrCreateType(CGM.getContext().getObjCIdType(), Unit);
- llvm::DIType DbgTy =
- CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
- Ty->getPointeeType(), Unit);
+ llvm::DIType DbgTy = CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type,
+ Ty, Ty->getPointeeType(), Unit);
return DbgTy;
}
-llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty, llvm::DIFile Unit) {
return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
Ty->getPointeeType(), Unit);
}
/// In C++ mode, types have linkage, so we can rely on the ODR and
/// on their mangled names, if they're external.
-static SmallString<256>
-getUniqueTagTypeName(const TagType *Ty, CodeGenModule &CGM,
- llvm::DICompileUnit TheCU) {
+static SmallString<256> getUniqueTagTypeName(const TagType *Ty,
+ CodeGenModule &CGM,
+ llvm::DICompileUnit TheCU) {
SmallString<256> FullName;
// FIXME: ODR should apply to ObjC++ exactly the same wasy it does to C++.
// For now, only apply ODR with C++.
@@ -627,7 +633,9 @@ CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
llvm::DICompositeType RetTy = DBuilder.createReplaceableForwardDecl(
Tag, RDName, Ctx, DefUnit, Line, 0, 0, 0, FullName);
- ReplaceMap.push_back(std::make_pair(Ty, static_cast<llvm::Value *>(RetTy)));
+ ReplaceMap.emplace_back(
+ std::piecewise_construct, std::make_tuple(Ty),
+ std::make_tuple(static_cast<llvm::Metadata *>(RetTy)));
return RetTy;
}
@@ -666,7 +674,7 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
if (BlockLiteralGeneric)
return BlockLiteralGeneric;
- SmallVector<llvm::Value *, 8> EltTys;
+ SmallVector<llvm::Metadata *, 8> EltTys;
llvm::DIType FieldTy;
QualType FType;
uint64_t FieldSize, FieldOffset;
@@ -685,9 +693,9 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
unsigned Flags = llvm::DIDescriptor::FlagAppleBlock;
unsigned LineNo = getLineNumber(CurLoc);
- EltTy = DBuilder.createStructType(Unit, "__block_descriptor",
- Unit, LineNo, FieldOffset, 0,
- Flags, llvm::DIType(), Elements);
+ EltTy = DBuilder.createStructType(Unit, "__block_descriptor", Unit, LineNo,
+ FieldOffset, 0, Flags, llvm::DIType(),
+ Elements);
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(Ty);
@@ -700,50 +708,52 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
FType = CGM.getContext().IntTy;
EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset));
- FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ FType = CGM.getContext().getPointerType(Ty->getPointeeType());
EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset));
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
FieldTy = DescTy;
FieldSize = CGM.getContext().getTypeSize(Ty);
FieldAlign = CGM.getContext().getTypeAlign(Ty);
- FieldTy = DBuilder.createMemberType(Unit, "__descriptor", Unit,
- LineNo, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
+ FieldTy =
+ DBuilder.createMemberType(Unit, "__descriptor", Unit, LineNo, FieldSize,
+ FieldAlign, FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
Elements = DBuilder.getOrCreateArray(EltTys);
- EltTy = DBuilder.createStructType(Unit, "__block_literal_generic",
- Unit, LineNo, FieldOffset, 0,
- Flags, llvm::DIType(), Elements);
+ EltTy = DBuilder.createStructType(Unit, "__block_literal_generic", Unit,
+ LineNo, FieldOffset, 0, Flags,
+ llvm::DIType(), Elements);
BlockLiteralGeneric = DBuilder.createPointerType(EltTy, Size);
return BlockLiteralGeneric;
}
-llvm::DIType CGDebugInfo::CreateType(const TemplateSpecializationType *Ty, llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
+ llvm::DIFile Unit) {
assert(Ty->isTypeAlias());
llvm::DIType Src = getOrCreateType(Ty->getAliasedType(), Unit);
SmallString<128> NS;
llvm::raw_svector_ostream OS(NS);
- Ty->getTemplateName().print(OS, CGM.getContext().getPrintingPolicy(), /*qualified*/ false);
+ Ty->getTemplateName().print(OS, CGM.getContext().getPrintingPolicy(),
+ /*qualified*/ false);
TemplateSpecializationType::PrintTemplateArgumentList(
OS, Ty->getArgs(), Ty->getNumArgs(),
CGM.getContext().getPrintingPolicy());
- TypeAliasDecl *AliasDecl =
- cast<TypeAliasTemplateDecl>(Ty->getTemplateName().getAsTemplateDecl())
- ->getTemplatedDecl();
+ TypeAliasDecl *AliasDecl = cast<TypeAliasTemplateDecl>(
+ Ty->getTemplateName().getAsTemplateDecl())->getTemplatedDecl();
SourceLocation Loc = AliasDecl->getLocation();
llvm::DIFile File = getOrCreateFile(Loc);
unsigned Line = getLineNumber(Loc);
- llvm::DIDescriptor Ctxt = getContextDescriptor(cast<Decl>(AliasDecl->getDeclContext()));
+ llvm::DIDescriptor Ctxt =
+ getContextDescriptor(cast<Decl>(AliasDecl->getDeclContext()));
return DBuilder.createTypedef(Src, internString(OS.str()), File, Line, Ctxt);
}
@@ -760,15 +770,15 @@ llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty, llvm::DIFile Unit) {
const TypedefNameDecl *TyDecl = Ty->getDecl();
llvm::DIDescriptor TypedefContext =
- getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()));
+ getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()));
- return
- DBuilder.createTypedef(Src, TyDecl->getName(), File, Line, TypedefContext);
+ return DBuilder.createTypedef(Src, TyDecl->getName(), File, Line,
+ TypedefContext);
}
llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
llvm::DIFile Unit) {
- SmallVector<llvm::Value *, 16> EltTys;
+ SmallVector<llvm::Metadata *, 16> EltTys;
// Add the result type at least.
EltTys.push_back(getOrCreateType(Ty->getReturnType(), Unit));
@@ -784,49 +794,66 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
EltTys.push_back(DBuilder.createUnspecifiedParameter());
}
- llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(EltTys);
+ llvm::DITypeArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
return DBuilder.createSubroutineType(Unit, EltTypeArray);
}
+/// Convert an AccessSpecifier into the corresponding DIDescriptor flag.
+/// As an optimization, return 0 if the access specifier equals the
+/// default for the containing type.
+static unsigned getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) {
+ AccessSpecifier Default = clang::AS_none;
+ if (RD && RD->isClass())
+ Default = clang::AS_private;
+ else if (RD && (RD->isStruct() || RD->isUnion()))
+ Default = clang::AS_public;
-llvm::DIType CGDebugInfo::createFieldType(StringRef name,
- QualType type,
- uint64_t sizeInBitsOverride,
- SourceLocation loc,
- AccessSpecifier AS,
- uint64_t offsetInBits,
- llvm::DIFile tunit,
- llvm::DIScope scope) {
+ if (Access == Default)
+ return 0;
+
+ switch (Access) {
+ case clang::AS_private:
+ return llvm::DIDescriptor::FlagPrivate;
+ case clang::AS_protected:
+ return llvm::DIDescriptor::FlagProtected;
+ case clang::AS_public:
+ return llvm::DIDescriptor::FlagPublic;
+ case clang::AS_none:
+ return 0;
+ }
+ llvm_unreachable("unexpected access enumerator");
+}
+
+llvm::DIType CGDebugInfo::createFieldType(
+ StringRef name, QualType type, uint64_t sizeInBitsOverride,
+ SourceLocation loc, AccessSpecifier AS, uint64_t offsetInBits,
+ llvm::DIFile tunit, llvm::DIScope scope, const RecordDecl *RD) {
llvm::DIType debugType = getOrCreateType(type, tunit);
// Get the location for the field.
llvm::DIFile file = getOrCreateFile(loc);
unsigned line = getLineNumber(loc);
- uint64_t sizeInBits = 0;
- unsigned alignInBits = 0;
+ uint64_t SizeInBits = 0;
+ unsigned AlignInBits = 0;
if (!type->isIncompleteArrayType()) {
- std::tie(sizeInBits, alignInBits) = CGM.getContext().getTypeInfo(type);
+ TypeInfo TI = CGM.getContext().getTypeInfo(type);
+ SizeInBits = TI.Width;
+ AlignInBits = TI.Align;
if (sizeInBitsOverride)
- sizeInBits = sizeInBitsOverride;
+ SizeInBits = sizeInBitsOverride;
}
- unsigned flags = 0;
- if (AS == clang::AS_private)
- flags |= llvm::DIDescriptor::FlagPrivate;
- else if (AS == clang::AS_protected)
- flags |= llvm::DIDescriptor::FlagProtected;
-
- return DBuilder.createMemberType(scope, name, file, line, sizeInBits,
- alignInBits, offsetInBits, flags, debugType);
+ unsigned flags = getAccessFlag(AS, RD);
+ return DBuilder.createMemberType(scope, name, file, line, SizeInBits,
+ AlignInBits, offsetInBits, flags, debugType);
}
/// CollectRecordLambdaFields - Helper for CollectRecordFields.
-void CGDebugInfo::
-CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
- SmallVectorImpl<llvm::Value *> &elements,
- llvm::DIType RecordTy) {
+void CGDebugInfo::CollectRecordLambdaFields(
+ const CXXRecordDecl *CXXDecl, SmallVectorImpl<llvm::Metadata *> &elements,
+ llvm::DIType RecordTy) {
// For C++11 Lambdas a Field will be the same as a Capture, but the Capture
// has the name and the location of the variable so we should iterate over
// both concurrently.
@@ -834,7 +861,8 @@ CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
RecordDecl::field_iterator Field = CXXDecl->field_begin();
unsigned fieldno = 0;
for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(),
- E = CXXDecl->captures_end(); I != E; ++I, ++Field, ++fieldno) {
+ E = CXXDecl->captures_end();
+ I != E; ++I, ++Field, ++fieldno) {
const LambdaCapture &C = *I;
if (C.capturesVariable()) {
VarDecl *V = C.getCapturedVar();
@@ -845,23 +873,22 @@ CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
SizeInBitsOverride = Field->getBitWidthValue(CGM.getContext());
assert(SizeInBitsOverride && "found named 0-width bitfield");
}
- llvm::DIType fieldType
- = createFieldType(VName, Field->getType(), SizeInBitsOverride,
- C.getLocation(), Field->getAccess(),
- layout.getFieldOffset(fieldno), VUnit, RecordTy);
+ llvm::DIType fieldType = createFieldType(
+ VName, Field->getType(), SizeInBitsOverride, C.getLocation(),
+ Field->getAccess(), layout.getFieldOffset(fieldno), VUnit, RecordTy,
+ CXXDecl);
elements.push_back(fieldType);
- } else {
+ } else if (C.capturesThis()) {
// TODO: Need to handle 'this' in some way by probably renaming the
// this of the lambda class and having a field member of 'this' or
// by using AT_object_pointer for the function and having that be
// used as 'this' for semantic references.
- assert(C.capturesThis() && "Field that isn't captured and isn't this?");
FieldDecl *f = *Field;
llvm::DIFile VUnit = getOrCreateFile(f->getLocation());
QualType type = f->getType();
- llvm::DIType fieldType
- = createFieldType("this", type, 0, f->getLocation(), f->getAccess(),
- layout.getFieldOffset(fieldno), VUnit, RecordTy);
+ llvm::DIType fieldType = createFieldType(
+ "this", type, 0, f->getLocation(), f->getAccess(),
+ layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl);
elements.push_back(fieldType);
}
@@ -869,11 +896,12 @@ CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
}
/// Helper for CollectRecordFields.
-llvm::DIDerivedType
-CGDebugInfo::CreateRecordStaticField(const VarDecl *Var,
- llvm::DIType RecordTy) {
+llvm::DIDerivedType CGDebugInfo::CreateRecordStaticField(const VarDecl *Var,
+ llvm::DIType RecordTy,
+ const RecordDecl *RD) {
// Create the descriptor for the static variable, with or without
// constant initializers.
+ Var = Var->getCanonicalDecl();
llvm::DIFile VUnit = getOrCreateFile(Var->getLocation());
llvm::DIType VTy = getOrCreateType(Var->getType(), VUnit);
@@ -890,25 +918,18 @@ CGDebugInfo::CreateRecordStaticField(const VarDecl *Var,
}
}
- unsigned Flags = 0;
- AccessSpecifier Access = Var->getAccess();
- if (Access == clang::AS_private)
- Flags |= llvm::DIDescriptor::FlagPrivate;
- else if (Access == clang::AS_protected)
- Flags |= llvm::DIDescriptor::FlagProtected;
-
+ unsigned Flags = getAccessFlag(Var->getAccess(), RD);
llvm::DIDerivedType GV = DBuilder.createStaticMemberType(
RecordTy, VName, VUnit, LineNumber, VTy, Flags, C);
- StaticDataMemberCache[Var->getCanonicalDecl()] = llvm::WeakVH(GV);
+ StaticDataMemberCache[Var->getCanonicalDecl()].reset(GV);
return GV;
}
/// CollectRecordNormalField - Helper for CollectRecordFields.
-void CGDebugInfo::
-CollectRecordNormalField(const FieldDecl *field, uint64_t OffsetInBits,
- llvm::DIFile tunit,
- SmallVectorImpl<llvm::Value *> &elements,
- llvm::DIType RecordTy) {
+void CGDebugInfo::CollectRecordNormalField(
+ const FieldDecl *field, uint64_t OffsetInBits, llvm::DIFile tunit,
+ SmallVectorImpl<llvm::Metadata *> &elements, llvm::DIType RecordTy,
+ const RecordDecl *RD) {
StringRef name = field->getName();
QualType type = field->getType();
@@ -922,20 +943,19 @@ CollectRecordNormalField(const FieldDecl *field, uint64_t OffsetInBits,
assert(SizeInBitsOverride && "found named 0-width bitfield");
}
- llvm::DIType fieldType
- = createFieldType(name, type, SizeInBitsOverride,
- field->getLocation(), field->getAccess(),
- OffsetInBits, tunit, RecordTy);
+ llvm::DIType fieldType =
+ createFieldType(name, type, SizeInBitsOverride, field->getLocation(),
+ field->getAccess(), OffsetInBits, tunit, RecordTy, RD);
elements.push_back(fieldType);
}
/// CollectRecordFields - A helper function to collect debug info for
/// record fields. This is used while creating debug info entry for a Record.
-void CGDebugInfo::CollectRecordFields(const RecordDecl *record,
- llvm::DIFile tunit,
- SmallVectorImpl<llvm::Value *> &elements,
- llvm::DICompositeType RecordTy) {
+void CGDebugInfo::CollectRecordFields(
+ const RecordDecl *record, llvm::DIFile tunit,
+ SmallVectorImpl<llvm::Metadata *> &elements,
+ llvm::DICompositeType RecordTy) {
const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(record);
if (CXXDecl && CXXDecl->isLambda())
@@ -951,18 +971,19 @@ void CGDebugInfo::CollectRecordFields(const RecordDecl *record,
for (const auto *I : record->decls())
if (const auto *V = dyn_cast<VarDecl>(I)) {
// Reuse the existing static member declaration if one exists
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator MI =
- StaticDataMemberCache.find(V->getCanonicalDecl());
+ auto MI = StaticDataMemberCache.find(V->getCanonicalDecl());
if (MI != StaticDataMemberCache.end()) {
assert(MI->second &&
"Static data member declaration should still exist");
elements.push_back(
llvm::DIDerivedType(cast<llvm::MDNode>(MI->second)));
- } else
- elements.push_back(CreateRecordStaticField(V, RecordTy));
+ } else {
+ auto Field = CreateRecordStaticField(V, RecordTy, record);
+ elements.push_back(Field);
+ }
} else if (const auto *field = dyn_cast<FieldDecl>(I)) {
- CollectRecordNormalField(field, layout.getFieldOffset(fieldNo),
- tunit, elements, RecordTy);
+ CollectRecordNormalField(field, layout.getFieldOffset(fieldNo), tunit,
+ elements, RecordTy, record);
// Bump field number for next field.
++fieldNo;
@@ -986,11 +1007,11 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DICompositeType CGDebugInfo::getOrCreateInstanceMethodType(
QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile Unit) {
// Add "this" pointer.
- llvm::DIArray Args = llvm::DICompositeType(
+ llvm::DITypeArray Args = llvm::DISubroutineType(
getOrCreateType(QualType(Func, 0), Unit)).getTypeArray();
- assert (Args.getNumElements() && "Invalid number of arguments!");
+ assert(Args.getNumElements() && "Invalid number of arguments!");
- SmallVector<llvm::Value *, 16> Elts;
+ SmallVector<llvm::Metadata *, 16> Elts;
// First element is always return type. For 'void' functions it is NULL.
Elts.push_back(Args.getElement(0));
@@ -1006,8 +1027,8 @@ llvm::DICompositeType CGDebugInfo::getOrCreateInstanceMethodType(
uint64_t Align = CGM.getContext().getTypeAlign(ThisPtrTy);
llvm::DIType PointeeType = getOrCreateType(PointeeTy, Unit);
llvm::DIType ThisPtrType =
- DBuilder.createPointerType(PointeeType, Size, Align);
- TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ DBuilder.createPointerType(PointeeType, Size, Align);
+ TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType);
// TODO: This and the artificial type below are misleading, the
// types aren't artificial the argument is, but the current
// metadata doesn't represent that.
@@ -1015,7 +1036,7 @@ llvm::DICompositeType CGDebugInfo::getOrCreateInstanceMethodType(
Elts.push_back(ThisPtrType);
} else {
llvm::DIType ThisPtrType = getOrCreateType(ThisPtr, Unit);
- TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType);
ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType);
Elts.push_back(ThisPtrType);
}
@@ -1024,7 +1045,7 @@ llvm::DICompositeType CGDebugInfo::getOrCreateInstanceMethodType(
for (unsigned i = 1, e = Args.getNumElements(); i != e; ++i)
Elts.push_back(Args.getElement(i));
- llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
+ llvm::DITypeArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
unsigned Flags = 0;
if (Func->getExtProtoInfo().RefQualifier == RQ_LValue)
@@ -1049,10 +1070,9 @@ static bool isFunctionLocalClass(const CXXRecordDecl *RD) {
/// a single member function GlobalDecl.
llvm::DISubprogram
CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
- llvm::DIFile Unit,
- llvm::DIType RecordTy) {
+ llvm::DIFile Unit, llvm::DIType RecordTy) {
bool IsCtorOrDtor =
- isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
+ isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
StringRef MethodName = getFunctionName(Method);
llvm::DICompositeType MethodTy = getOrCreateMethodType(Method, Unit);
@@ -1096,16 +1116,12 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
unsigned Flags = 0;
if (Method->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
- AccessSpecifier Access = Method->getAccess();
- if (Access == clang::AS_private)
- Flags |= llvm::DIDescriptor::FlagPrivate;
- else if (Access == clang::AS_protected)
- Flags |= llvm::DIDescriptor::FlagProtected;
+ Flags |= getAccessFlag(Method->getAccess(), Method->getParent());
if (const CXXConstructorDecl *CXXC = dyn_cast<CXXConstructorDecl>(Method)) {
if (CXXC->isExplicit())
Flags |= llvm::DIDescriptor::FlagExplicit;
} else if (const CXXConversionDecl *CXXC =
- dyn_cast<CXXConversionDecl>(Method)) {
+ dyn_cast<CXXConversionDecl>(Method)) {
if (CXXC->isExplicit())
Flags |= llvm::DIDescriptor::FlagExplicit;
}
@@ -1117,16 +1133,13 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
Flags |= llvm::DIDescriptor::FlagRValueReference;
llvm::DIArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
- llvm::DISubprogram SP =
- DBuilder.createMethod(RecordTy, MethodName, MethodLinkageName,
- MethodDefUnit, MethodLine,
- MethodTy, /*isLocalToUnit=*/false,
- /* isDefinition=*/ false,
- Virtuality, VIndex, ContainingType,
- Flags, CGM.getLangOpts().Optimize, nullptr,
- TParamsArray);
+ llvm::DISubprogram SP = DBuilder.createMethod(
+ RecordTy, MethodName, MethodLinkageName, MethodDefUnit, MethodLine,
+ MethodTy, /*isLocalToUnit=*/false,
+ /* isDefinition=*/false, Virtuality, VIndex, ContainingType, Flags,
+ CGM.getLangOpts().Optimize, nullptr, TParamsArray);
- SPCache[Method->getCanonicalDecl()] = llvm::WeakVH(SP);
+ SPCache[Method->getCanonicalDecl()].reset(SP);
return SP;
}
@@ -1134,53 +1147,49 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
/// CollectCXXMemberFunctions - A helper function to collect debug info for
/// C++ member functions. This is used while creating debug info entry for
/// a Record.
-void CGDebugInfo::
-CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
- SmallVectorImpl<llvm::Value *> &EltTys,
- llvm::DIType RecordTy) {
+void CGDebugInfo::CollectCXXMemberFunctions(
+ const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys, llvm::DIType RecordTy) {
// Since we want more than just the individual member decls if we
// have templated functions iterate over every declaration to gather
// the functions.
- for(const auto *I : RD->decls()) {
- if (const auto *Method = dyn_cast<CXXMethodDecl>(I)) {
- // Reuse the existing member function declaration if it exists.
- // It may be associated with the declaration of the type & should be
- // reused as we're building the definition.
- //
- // This situation can arise in the vtable-based debug info reduction where
- // implicit members are emitted in a non-vtable TU.
- llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator MI =
- SPCache.find(Method->getCanonicalDecl());
- if (MI == SPCache.end()) {
- // If the member is implicit, lazily create it when we see the
- // definition, not before. (an ODR-used implicit default ctor that's
- // never actually code generated should not produce debug info)
- if (!Method->isImplicit())
- EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
- } else
- EltTys.push_back(MI->second);
- } else if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(I)) {
- // Add any template specializations that have already been seen. Like
- // implicit member functions, these may have been added to a declaration
- // in the case of vtable-based debug info reduction.
- for (const auto *SI : FTD->specializations()) {
- llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator MI =
- SPCache.find(cast<CXXMethodDecl>(SI)->getCanonicalDecl());
- if (MI != SPCache.end())
- EltTys.push_back(MI->second);
- }
- }
+ for (const auto *I : RD->decls()) {
+ const auto *Method = dyn_cast<CXXMethodDecl>(I);
+ // If the member is implicit, don't add it to the member list. This avoids
+ // the member being added to type units by LLVM, while still allowing it
+ // to be emitted into the type declaration/reference inside the compile
+ // unit.
+ // FIXME: Handle Using(Shadow?)Decls here to create
+ // DW_TAG_imported_declarations inside the class for base decls brought into
+ // derived classes. GDB doesn't seem to notice/leverage these when I tried
+ // it, so I'm not rushing to fix this. (GCC seems to produce them, if
+ // referenced)
+ if (!Method || Method->isImplicit())
+ continue;
+
+ if (Method->getType()->getAs<FunctionProtoType>()->getContainedAutoType())
+ continue;
+
+ // Reuse the existing member function declaration if it exists.
+ // It may be associated with the declaration of the type & should be
+ // reused as we're building the definition.
+ //
+ // This situation can arise in the vtable-based debug info reduction where
+ // implicit members are emitted in a non-vtable TU.
+ auto MI = SPCache.find(Method->getCanonicalDecl());
+ EltTys.push_back(MI == SPCache.end()
+ ? CreateCXXMemberFunction(Method, Unit, RecordTy)
+ : static_cast<llvm::Metadata *>(MI->second));
}
}
/// CollectCXXBases - A helper function to collect debug info for
/// C++ base classes. This is used while creating debug info entry for
/// a Record.
-void CGDebugInfo::
-CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
- SmallVectorImpl<llvm::Value *> &EltTys,
- llvm::DIType RecordTy) {
+void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys,
+ llvm::DIType RecordTy) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
for (const auto &BI : RD->bases()) {
@@ -1188,40 +1197,40 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
uint64_t BaseOffset;
const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(BI.getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(BI.getType()->getAs<RecordType>()->getDecl());
if (BI.isVirtual()) {
- // virtual base offset offset is -ve. The code generator emits dwarf
- // expression where it expects +ve number.
- BaseOffset =
- 0 - CGM.getItaniumVTableContext()
- .getVirtualBaseOffsetOffset(RD, Base).getQuantity();
+ if (CGM.getTarget().getCXXABI().isItaniumFamily()) {
+ // virtual base offset offset is -ve. The code generator emits dwarf
+ // expression where it expects +ve number.
+ BaseOffset = 0 - CGM.getItaniumVTableContext()
+ .getVirtualBaseOffsetOffset(RD, Base)
+ .getQuantity();
+ } else {
+ // In the MS ABI, store the vbtable offset, which is analogous to the
+ // vbase offset offset in Itanium.
+ BaseOffset =
+ 4 * CGM.getMicrosoftVTableContext().getVBTableIndex(RD, Base);
+ }
BFlags = llvm::DIDescriptor::FlagVirtual;
} else
BaseOffset = CGM.getContext().toBits(RL.getBaseClassOffset(Base));
// FIXME: Inconsistent units for BaseOffset. It is in bytes when
// BI->isVirtual() and bits when not.
- AccessSpecifier Access = BI.getAccessSpecifier();
- if (Access == clang::AS_private)
- BFlags |= llvm::DIDescriptor::FlagPrivate;
- else if (Access == clang::AS_protected)
- BFlags |= llvm::DIDescriptor::FlagProtected;
-
- llvm::DIType DTy =
- DBuilder.createInheritance(RecordTy,
- getOrCreateType(BI.getType(), Unit),
- BaseOffset, BFlags);
+ BFlags |= getAccessFlag(BI.getAccessSpecifier(), RD);
+ llvm::DIType DTy = DBuilder.createInheritance(
+ RecordTy, getOrCreateType(BI.getType(), Unit), BaseOffset, BFlags);
EltTys.push_back(DTy);
}
}
/// CollectTemplateParams - A helper function to collect template parameters.
-llvm::DIArray CGDebugInfo::
-CollectTemplateParams(const TemplateParameterList *TPList,
- ArrayRef<TemplateArgument> TAList,
- llvm::DIFile Unit) {
- SmallVector<llvm::Value *, 16> TemplateParams;
+llvm::DIArray
+CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
+ ArrayRef<TemplateArgument> TAList,
+ llvm::DIFile Unit) {
+ SmallVector<llvm::Metadata *, 16> TemplateParams;
for (unsigned i = 0, e = TAList.size(); i != e; ++i) {
const TemplateArgument &TA = TAList[i];
StringRef Name;
@@ -1244,46 +1253,41 @@ CollectTemplateParams(const TemplateParameterList *TPList,
} break;
case TemplateArgument::Declaration: {
const ValueDecl *D = TA.getAsDecl();
- bool InstanceMember = D->isCXXInstanceMember();
- QualType T = InstanceMember
- ? CGM.getContext().getMemberPointerType(
- D->getType(), cast<RecordDecl>(D->getDeclContext())
- ->getTypeForDecl())
- : CGM.getContext().getPointerType(D->getType());
+ QualType T = TA.getParamTypeForDecl().getDesugaredType(CGM.getContext());
llvm::DIType TTy = getOrCreateType(T, Unit);
- llvm::Value *V = nullptr;
+ llvm::Constant *V = nullptr;
+ const CXXMethodDecl *MD;
// Variable pointer template parameters have a value that is the address
// of the variable.
- if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (const auto *VD = dyn_cast<VarDecl>(D))
V = CGM.GetAddrOfGlobalVar(VD);
// Member function pointers have special support for building them, though
// this is currently unsupported in LLVM CodeGen.
- if (InstanceMember) {
- if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(D))
- V = CGM.getCXXABI().EmitMemberPointer(method);
- } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ else if ((MD = dyn_cast<CXXMethodDecl>(D)) && MD->isInstance())
+ V = CGM.getCXXABI().EmitMemberPointer(MD);
+ else if (const auto *FD = dyn_cast<FunctionDecl>(D))
V = CGM.GetAddrOfFunction(FD);
// Member data pointers have special handling too to compute the fixed
// offset within the object.
- if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D)) {
+ else if (const auto *MPT = dyn_cast<MemberPointerType>(T.getTypePtr())) {
// These five lines (& possibly the above member function pointer
// handling) might be able to be refactored to use similar code in
// CodeGenModule::getMemberPointerConstant
uint64_t fieldOffset = CGM.getContext().getFieldOffset(D);
CharUnits chars =
- CGM.getContext().toCharUnitsFromBits((int64_t) fieldOffset);
- V = CGM.getCXXABI().EmitMemberDataPointer(
- cast<MemberPointerType>(T.getTypePtr()), chars);
+ CGM.getContext().toCharUnitsFromBits((int64_t)fieldOffset);
+ V = CGM.getCXXABI().EmitMemberDataPointer(MPT, chars);
}
llvm::DITemplateValueParameter TVP =
- DBuilder.createTemplateValueParameter(TheCU, Name, TTy,
- V->stripPointerCasts());
+ DBuilder.createTemplateValueParameter(
+ TheCU, Name, TTy,
+ cast_or_null<llvm::Constant>(V->stripPointerCasts()));
TemplateParams.push_back(TVP);
} break;
case TemplateArgument::NullPtr: {
QualType T = TA.getNullPtrType();
llvm::DIType TTy = getOrCreateType(T, Unit);
- llvm::Value *V = nullptr;
+ llvm::Constant *V = nullptr;
// Special case member data pointer null values since they're actually -1
// instead of zero.
if (const MemberPointerType *MPT =
@@ -1298,33 +1302,34 @@ CollectTemplateParams(const TemplateParameterList *TPList,
if (!V)
V = llvm::ConstantInt::get(CGM.Int8Ty, 0);
llvm::DITemplateValueParameter TVP =
- DBuilder.createTemplateValueParameter(TheCU, Name, TTy, V);
+ DBuilder.createTemplateValueParameter(TheCU, Name, TTy,
+ cast<llvm::Constant>(V));
TemplateParams.push_back(TVP);
} break;
case TemplateArgument::Template: {
- llvm::DITemplateValueParameter TVP =
- DBuilder.createTemplateTemplateParameter(
- TheCU, Name, llvm::DIType(),
- TA.getAsTemplate().getAsTemplateDecl()
- ->getQualifiedNameAsString());
+ llvm::DITemplateValueParameter
+ TVP = DBuilder.createTemplateTemplateParameter(
+ TheCU, Name, llvm::DIType(),
+ TA.getAsTemplate().getAsTemplateDecl()->getQualifiedNameAsString());
TemplateParams.push_back(TVP);
} break;
case TemplateArgument::Pack: {
- llvm::DITemplateValueParameter TVP =
- DBuilder.createTemplateParameterPack(
- TheCU, Name, llvm::DIType(),
- CollectTemplateParams(nullptr, TA.getPackAsArray(), Unit));
+ llvm::DITemplateValueParameter TVP = DBuilder.createTemplateParameterPack(
+ TheCU, Name, llvm::DIType(),
+ CollectTemplateParams(nullptr, TA.getPackAsArray(), Unit));
TemplateParams.push_back(TVP);
} break;
case TemplateArgument::Expression: {
const Expr *E = TA.getAsExpr();
QualType T = E->getType();
- llvm::Value *V = CGM.EmitConstantExpr(E, T);
+ if (E->isGLValue())
+ T = CGM.getContext().getLValueReferenceType(T);
+ llvm::Constant *V = CGM.EmitConstantExpr(E, T);
assert(V && "Expression in template argument isn't constant");
llvm::DIType TTy = getOrCreateType(T, Unit);
llvm::DITemplateValueParameter TVP =
- DBuilder.createTemplateValueParameter(TheCU, Name, TTy,
- V->stripPointerCasts());
+ DBuilder.createTemplateValueParameter(
+ TheCU, Name, TTy, cast<llvm::Constant>(V->stripPointerCasts()));
TemplateParams.push_back(TVP);
} break;
// And the following should never occur:
@@ -1339,13 +1344,13 @@ CollectTemplateParams(const TemplateParameterList *TPList,
/// CollectFunctionTemplateParams - A helper function to collect debug
/// info for function template parameters.
-llvm::DIArray CGDebugInfo::
-CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit) {
+llvm::DIArray CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD,
+ llvm::DIFile Unit) {
if (FD->getTemplatedKind() ==
FunctionDecl::TK_FunctionTemplateSpecialization) {
- const TemplateParameterList *TList =
- FD->getTemplateSpecializationInfo()->getTemplate()
- ->getTemplateParameters();
+ const TemplateParameterList *TList = FD->getTemplateSpecializationInfo()
+ ->getTemplate()
+ ->getTemplateParameters();
return CollectTemplateParams(
TList, FD->getTemplateSpecializationArgs()->asArray(), Unit);
}
@@ -1354,13 +1359,12 @@ CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit) {
/// CollectCXXTemplateParams - A helper function to collect debug info for
/// template parameters.
-llvm::DIArray CGDebugInfo::
-CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TSpecial,
- llvm::DIFile Unit) {
+llvm::DIArray CGDebugInfo::CollectCXXTemplateParams(
+ const ClassTemplateSpecializationDecl *TSpecial, llvm::DIFile Unit) {
// Always get the full list of parameters, not just the ones from
// the specialization.
TemplateParameterList *TPList =
- TSpecial->getSpecializedTemplate()->getTemplateParameters();
+ TSpecial->getSpecializedTemplate()->getTemplateParameters();
const TemplateArgumentList &TAList = TSpecial->getTemplateArgs();
return CollectTemplateParams(TPList, TAList.asArray(), Unit);
}
@@ -1373,12 +1377,12 @@ llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
ASTContext &Context = CGM.getContext();
/* Function type */
- llvm::Value *STy = getOrCreateType(Context.IntTy, Unit);
- llvm::DIArray SElements = DBuilder.getOrCreateArray(STy);
+ llvm::Metadata *STy = getOrCreateType(Context.IntTy, Unit);
+ llvm::DITypeArray SElements = DBuilder.getOrCreateTypeArray(STy);
llvm::DIType SubTy = DBuilder.createSubroutineType(Unit, SElements);
unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
- llvm::DIType vtbl_ptr_type = DBuilder.createPointerType(SubTy, Size, 0,
- "__vtbl_ptr_type");
+ llvm::DIType vtbl_ptr_type =
+ DBuilder.createPointerType(SubTy, Size, 0, "__vtbl_ptr_type");
VTablePtrType = DBuilder.createPointerType(vtbl_ptr_type, Size);
return VTablePtrType;
}
@@ -1389,12 +1393,10 @@ StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
return internString("_vptr$", RD->getNameAsString());
}
-
/// CollectVTableInfo - If the C++ class has vtable info then insert appropriate
/// debug info entry in EltTys vector.
-void CGDebugInfo::
-CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
- SmallVectorImpl<llvm::Value *> &EltTys) {
+void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
// If there is a primary base then it will hold vtable info.
@@ -1406,11 +1408,9 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
return;
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
- llvm::DIType VPTR
- = DBuilder.createMemberType(Unit, getVTableName(RD), Unit,
- 0, Size, 0, 0,
- llvm::DIDescriptor::FlagArtificial,
- getOrCreateVTablePtrType(Unit));
+ llvm::DIType VPTR = DBuilder.createMemberType(
+ Unit, getVTableName(RD), Unit, 0, Size, 0, 0,
+ llvm::DIDescriptor::FlagArtificial, getOrCreateVTablePtrType(Unit));
EltTys.push_back(VPTR);
}
@@ -1436,15 +1436,14 @@ void CGDebugInfo::completeType(const EnumDecl *ED) {
if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
return;
QualType Ty = CGM.getContext().getEnumType(ED);
- void* TyPtr = Ty.getAsOpaquePtr();
+ void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I == TypeCache.end() ||
- !llvm::DIType(cast<llvm::MDNode>(static_cast<llvm::Value *>(I->second)))
- .isForwardDecl())
+ !llvm::DIType(cast<llvm::MDNode>(I->second)).isForwardDecl())
return;
llvm::DIType Res = CreateTypeDefinition(Ty->castAs<EnumType>());
assert(!Res.isForwardDecl());
- TypeCache[TyPtr] = Res;
+ TypeCache[TyPtr].reset(Res);
}
void CGDebugInfo::completeType(const RecordDecl *RD) {
@@ -1471,15 +1470,14 @@ void CGDebugInfo::completeClassData(const RecordDecl *RD) {
if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
return;
QualType Ty = CGM.getContext().getRecordType(RD);
- void* TyPtr = Ty.getAsOpaquePtr();
+ void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I != TypeCache.end() &&
- !llvm::DIType(cast<llvm::MDNode>(static_cast<llvm::Value *>(I->second)))
- .isForwardDecl())
+ !llvm::DIType(cast<llvm::MDNode>(I->second)).isForwardDecl())
return;
llvm::DIType Res = CreateTypeDefinition(Ty->castAs<RecordType>());
assert(!Res.isForwardDecl());
- TypeCache[TyPtr] = Res;
+ TypeCache[TyPtr].reset(Res);
}
static bool hasExplicitMemberDefinition(CXXRecordDecl::method_iterator I,
@@ -1563,11 +1561,11 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
CollectContainingType(CXXDecl, FwdDecl);
// Push the struct on region stack.
- LexicalBlockStack.push_back(&*FwdDecl);
- RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+ LexicalBlockStack.emplace_back(&*FwdDecl);
+ RegionMap[Ty->getDecl()].reset(FwdDecl);
// Convert all the elements.
- SmallVector<llvm::Value *, 16> EltTys;
+ SmallVector<llvm::Metadata *, 16> EltTys;
// what about nested types?
// Note: The split of CXXDecl information here is intentional, the
@@ -1589,9 +1587,9 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
RegionMap.erase(Ty->getDecl());
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
- FwdDecl.setTypeArray(Elements);
+ DBuilder.replaceArrays(FwdDecl, Elements);
- RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+ RegionMap[Ty->getDecl()].reset(FwdDecl);
return FwdDecl;
}
@@ -1602,7 +1600,6 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCObjectType *Ty,
return getOrCreateType(Ty->getBaseType(), Unit);
}
-
/// \return true if Getter has the default name for the property PD.
static bool hasDefaultGetterName(const ObjCPropertyDecl *PD,
const ObjCMethodDecl *Getter) {
@@ -1612,7 +1609,7 @@ static bool hasDefaultGetterName(const ObjCPropertyDecl *PD,
assert(Getter->getDeclName().isObjCZeroArgSelector());
return PD->getName() ==
- Getter->getDeclName().getObjCSelector().getNameForSlot(0);
+ Getter->getDeclName().getObjCSelector().getNameForSlot(0);
}
/// \return true if Setter has the default name for the property PD.
@@ -1624,7 +1621,7 @@ static bool hasDefaultSetterName(const ObjCPropertyDecl *PD,
assert(Setter->getDeclName().isObjCOneArgSelector());
return SelectorTable::constructSetterName(PD->getName()) ==
- Setter->getDeclName().getObjCSelector().getNameForSlot(0);
+ Setter->getDeclName().getObjCSelector().getNameForSlot(0);
}
/// CreateType - get objective-c interface type.
@@ -1650,11 +1647,11 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
return FwdDecl;
}
-
return CreateTypeDefinition(Ty, Unit);
}
-llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
+ llvm::DIFile Unit) {
ObjCInterfaceDecl *ID = Ty->getDecl();
llvm::DIFile DefUnit = getOrCreateFile(ID->getLocation());
unsigned Line = getLineNumber(ID->getLocation());
@@ -1668,30 +1665,28 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm
if (ID->getImplementation())
Flags |= llvm::DIDescriptor::FlagObjcClassComplete;
- llvm::DICompositeType RealDecl =
- DBuilder.createStructType(Unit, ID->getName(), DefUnit,
- Line, Size, Align, Flags,
- llvm::DIType(), llvm::DIArray(), RuntimeLang);
+ llvm::DICompositeType RealDecl = DBuilder.createStructType(
+ Unit, ID->getName(), DefUnit, Line, Size, Align, Flags, llvm::DIType(),
+ llvm::DIArray(), RuntimeLang);
QualType QTy(Ty, 0);
- TypeCache[QTy.getAsOpaquePtr()] = RealDecl;
+ TypeCache[QTy.getAsOpaquePtr()].reset(RealDecl);
// Push the struct on region stack.
- LexicalBlockStack.push_back(static_cast<llvm::MDNode*>(RealDecl));
- RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
+ LexicalBlockStack.emplace_back(static_cast<llvm::MDNode *>(RealDecl));
+ RegionMap[Ty->getDecl()].reset(RealDecl);
// Convert all the elements.
- SmallVector<llvm::Value *, 16> EltTys;
+ SmallVector<llvm::Metadata *, 16> EltTys;
ObjCInterfaceDecl *SClass = ID->getSuperClass();
if (SClass) {
llvm::DIType SClassTy =
- getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
+ getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
if (!SClassTy.isValid())
return llvm::DIType();
- llvm::DIType InhTag =
- DBuilder.createInheritance(RealDecl, SClassTy, 0, 0);
+ llvm::DIType InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0, 0);
EltTys.push_back(InhTag);
}
@@ -1702,15 +1697,13 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm
unsigned PLine = getLineNumber(Loc);
ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
- llvm::MDNode *PropertyNode =
- DBuilder.createObjCProperty(PD->getName(),
- PUnit, PLine,
- hasDefaultGetterName(PD, Getter) ? "" :
- getSelectorName(PD->getGetterName()),
- hasDefaultSetterName(PD, Setter) ? "" :
- getSelectorName(PD->getSetterName()),
- PD->getPropertyAttributes(),
- getOrCreateType(PD->getType(), PUnit));
+ llvm::MDNode *PropertyNode = DBuilder.createObjCProperty(
+ PD->getName(), PUnit, PLine,
+ hasDefaultGetterName(PD, Getter) ? ""
+ : getSelectorName(PD->getGetterName()),
+ hasDefaultSetterName(PD, Setter) ? ""
+ : getSelectorName(PD->getSetterName()),
+ PD->getPropertyAttributes(), getOrCreateType(PD->getType(), PUnit));
EltTys.push_back(PropertyNode);
}
@@ -1750,8 +1743,8 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm
// non-fragile ABI. For bitfields, use the bit offset into the first
// byte of storage of the bitfield. For other fields, use zero.
if (Field->isBitField()) {
- FieldOffset = CGM.getObjCRuntime().ComputeBitfieldBitOffset(
- CGM, ID, Field);
+ FieldOffset =
+ CGM.getObjCRuntime().ComputeBitfieldBitOffset(CGM, ID, Field);
FieldOffset %= CGM.getContext().getCharWidth();
} else {
FieldOffset = 0;
@@ -1765,38 +1758,38 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm
Flags = llvm::DIDescriptor::FlagProtected;
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
Flags = llvm::DIDescriptor::FlagPrivate;
+ else if (Field->getAccessControl() == ObjCIvarDecl::Public)
+ Flags = llvm::DIDescriptor::FlagPublic;
llvm::MDNode *PropertyNode = nullptr;
if (ObjCImplementationDecl *ImpD = ID->getImplementation()) {
if (ObjCPropertyImplDecl *PImpD =
- ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) {
+ ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) {
if (ObjCPropertyDecl *PD = PImpD->getPropertyDecl()) {
SourceLocation Loc = PD->getLocation();
llvm::DIFile PUnit = getOrCreateFile(Loc);
unsigned PLine = getLineNumber(Loc);
ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
- PropertyNode =
- DBuilder.createObjCProperty(PD->getName(),
- PUnit, PLine,
- hasDefaultGetterName(PD, Getter) ? "" :
- getSelectorName(PD->getGetterName()),
- hasDefaultSetterName(PD, Setter) ? "" :
- getSelectorName(PD->getSetterName()),
- PD->getPropertyAttributes(),
- getOrCreateType(PD->getType(), PUnit));
+ PropertyNode = DBuilder.createObjCProperty(
+ PD->getName(), PUnit, PLine,
+ hasDefaultGetterName(PD, Getter) ? "" : getSelectorName(
+ PD->getGetterName()),
+ hasDefaultSetterName(PD, Setter) ? "" : getSelectorName(
+ PD->getSetterName()),
+ PD->getPropertyAttributes(),
+ getOrCreateType(PD->getType(), PUnit));
}
}
}
- FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit,
- FieldLine, FieldSize, FieldAlign,
- FieldOffset, Flags, FieldTy,
- PropertyNode);
+ FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit, FieldLine,
+ FieldSize, FieldAlign, FieldOffset, Flags,
+ FieldTy, PropertyNode);
EltTys.push_back(FieldTy);
}
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
- RealDecl.setTypeArray(Elements);
+ DBuilder.replaceArrays(RealDecl, Elements);
LexicalBlockStack.pop_back();
return RealDecl;
@@ -1810,7 +1803,7 @@ llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile Unit) {
// Use Count == -1 to express such arrays.
Count = -1;
- llvm::Value *Subscript = DBuilder.getOrCreateSubrange(0, Count);
+ llvm::Metadata *Subscript = DBuilder.getOrCreateSubrange(0, Count);
llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
@@ -1819,8 +1812,7 @@ llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile Unit) {
return DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray);
}
-llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile Unit) {
uint64_t Size;
uint64_t Align;
@@ -1828,7 +1820,7 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
Size = 0;
Align =
- CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
+ CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
} else if (Ty->isIncompleteArrayType()) {
Size = 0;
if (Ty->getElementType()->isIncompleteType())
@@ -1847,7 +1839,7 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
// Add the dimensions of the array. FIXME: This loses CV qualifiers from
// interior arrays, do we care? Why aren't nested arrays represented the
// obvious/recursive way?
- SmallVector<llvm::Value *, 8> Subscripts;
+ SmallVector<llvm::Metadata *, 8> Subscripts;
QualType EltTy(Ty, 0);
while ((Ty = dyn_cast<ArrayType>(EltTy))) {
// If the number of elements is known, then count is that number. Otherwise,
@@ -1857,7 +1849,7 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
// struct foo {
// int x[0];
// };
- int64_t Count = -1; // Count == -1 is an unbounded array.
+ int64_t Count = -1; // Count == -1 is an unbounded array.
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
Count = CAT->getSize().getZExtValue();
@@ -1868,22 +1860,21 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts);
- llvm::DIType DbgTy =
- DBuilder.createArrayType(Size, Align, getOrCreateType(EltTy, Unit),
- SubscriptArray);
+ llvm::DIType DbgTy = DBuilder.createArrayType(
+ Size, Align, getOrCreateType(EltTy, Unit), SubscriptArray);
return DbgTy;
}
llvm::DIType CGDebugInfo::CreateType(const LValueReferenceType *Ty,
llvm::DIFile Unit) {
- return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type,
- Ty, Ty->getPointeeType(), Unit);
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type, Ty,
+ Ty->getPointeeType(), Unit);
}
llvm::DIType CGDebugInfo::CreateType(const RValueReferenceType *Ty,
llvm::DIFile Unit) {
- return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type,
- Ty, Ty->getPointeeType(), Unit);
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type, Ty,
+ Ty->getPointeeType(), Unit);
}
llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
@@ -1891,18 +1882,19 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
llvm::DIType ClassType = getOrCreateType(QualType(Ty->getClass(), 0), U);
if (!Ty->getPointeeType()->isFunctionType())
return DBuilder.createMemberPointerType(
- getOrCreateType(Ty->getPointeeType(), U), ClassType);
+ getOrCreateType(Ty->getPointeeType(), U), ClassType,
+ CGM.getContext().getTypeSize(Ty));
const FunctionProtoType *FPT =
- Ty->getPointeeType()->getAs<FunctionProtoType>();
- return DBuilder.createMemberPointerType(getOrCreateInstanceMethodType(
- CGM.getContext().getPointerType(QualType(Ty->getClass(),
- FPT->getTypeQuals())),
- FPT, U), ClassType);
+ Ty->getPointeeType()->getAs<FunctionProtoType>();
+ return DBuilder.createMemberPointerType(
+ getOrCreateInstanceMethodType(CGM.getContext().getPointerType(QualType(
+ Ty->getClass(), FPT->getTypeQuals())),
+ FPT, U),
+ ClassType, CGM.getContext().getTypeSize(Ty));
}
-llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
- llvm::DIFile U) {
+llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty, llvm::DIFile U) {
// Ignore the atomic wrapping
// FIXME: What is the correct representation?
return getOrCreateType(Ty->getValueType(), U);
@@ -1931,7 +1923,9 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumType *Ty) {
llvm::DIType RetTy = DBuilder.createReplaceableForwardDecl(
llvm::dwarf::DW_TAG_enumeration_type, EDName, EDContext, DefUnit, Line,
0, Size, Align, FullName);
- ReplaceMap.push_back(std::make_pair(Ty, static_cast<llvm::Value *>(RetTy)));
+ ReplaceMap.emplace_back(
+ std::piecewise_construct, std::make_tuple(Ty),
+ std::make_tuple(static_cast<llvm::Metadata *>(RetTy)));
return RetTy;
}
@@ -1950,12 +1944,11 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
// Create DIEnumerator elements for each enumerator.
- SmallVector<llvm::Value *, 16> Enumerators;
+ SmallVector<llvm::Metadata *, 16> Enumerators;
ED = ED->getDefinition();
for (const auto *Enum : ED->enumerators()) {
- Enumerators.push_back(
- DBuilder.createEnumerator(Enum->getName(),
- Enum->getInitVal().getSExtValue()));
+ Enumerators.push_back(DBuilder.createEnumerator(
+ Enum->getName(), Enum->getInitVal().getSExtValue()));
}
// Return a CompositeType for the enum itself.
@@ -1964,13 +1957,13 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
unsigned Line = getLineNumber(ED->getLocation());
llvm::DIDescriptor EnumContext =
- getContextDescriptor(cast<Decl>(ED->getDeclContext()));
- llvm::DIType ClassTy = ED->isFixed() ?
- getOrCreateType(ED->getIntegerType(), DefUnit) : llvm::DIType();
+ getContextDescriptor(cast<Decl>(ED->getDeclContext()));
+ llvm::DIType ClassTy = ED->isFixed()
+ ? getOrCreateType(ED->getIntegerType(), DefUnit)
+ : llvm::DIType();
llvm::DIType DbgTy =
- DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
- Size, Align, EltArray,
- ClassTy, FullName);
+ DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
+ Size, Align, EltArray, ClassTy, FullName);
return DbgTy;
}
@@ -1991,7 +1984,8 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
if (Spec->isTypeAlias())
return C.getQualifiedType(T.getTypePtr(), Quals);
T = Spec->desugar();
- break; }
+ break;
+ }
case Type::TypeOfExpr:
T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
break;
@@ -2018,8 +2012,7 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
break;
case Type::Auto:
QualType DT = cast<AutoType>(T)->getDeducedType();
- if (DT.isNull())
- return T;
+ assert(!DT.isNull() && "Undeduced types shouldn't reach here.");
T = DT;
break;
}
@@ -2039,7 +2032,7 @@ llvm::DIType CGDebugInfo::getTypeOrNull(QualType Ty) {
auto it = TypeCache.find(Ty.getAsOpaquePtr());
if (it != TypeCache.end()) {
// Verify that the debug info still exists.
- if (llvm::Value *V = it->second)
+ if (llvm::Metadata *V = it->second)
return llvm::DIType(cast<llvm::MDNode>(V));
}
@@ -2071,10 +2064,10 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
// Otherwise create the type.
llvm::DIType Res = CreateTypeNode(Ty, Unit);
- void* TyPtr = Ty.getAsOpaquePtr();
+ void *TyPtr = Ty.getAsOpaquePtr();
// And update the type cache.
- TypeCache[TyPtr] = Res;
+ TypeCache[TyPtr].reset(Res);
return Res;
}
@@ -2096,8 +2089,8 @@ unsigned CGDebugInfo::Checksum(const ObjCInterfaceDecl *ID) {
ObjCInterfaceDecl *CGDebugInfo::getObjCInterfaceDecl(QualType Ty) {
switch (Ty->getTypeClass()) {
case Type::ObjCObjectPointer:
- return getObjCInterfaceDecl(cast<ObjCObjectPointerType>(Ty)
- ->getPointeeType());
+ return getObjCInterfaceDecl(
+ cast<ObjCObjectPointerType>(Ty)->getPointeeType());
case Type::ObjCInterface:
return cast<ObjCInterfaceType>(Ty)->getDecl();
default:
@@ -2111,8 +2104,6 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
if (Ty.hasLocalQualifiers())
return CreateQualifiedType(Ty, Unit);
- const char *Diag = nullptr;
-
// Work out details of type.
switch (Ty->getTypeClass()) {
#define TYPE(Class, Base)
@@ -2172,6 +2163,7 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
case Type::TemplateSpecialization:
return CreateType(cast<TemplateSpecializationType>(Ty), Unit);
+ case Type::Auto:
case Type::Attributed:
case Type::Elaborated:
case Type::Paren:
@@ -2181,18 +2173,10 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
case Type::Decltype:
case Type::UnaryTransform:
case Type::PackExpansion:
- llvm_unreachable("type should have been unwrapped!");
- case Type::Auto:
- Diag = "auto";
break;
}
- assert(Diag && "Fall through without a diagnostic?");
- unsigned DiagID = CGM.getDiags().getCustomDiagID(DiagnosticsEngine::Error,
- "debug information for %0 is not yet supported");
- CGM.getDiags().Report(DiagID)
- << Diag;
- return llvm::DIType();
+ llvm_unreachable("type should have been unwrapped!");
}
/// getOrCreateLimitedType - Get the type from the cache or create a new
@@ -2206,7 +2190,8 @@ llvm::DIType CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty,
// We may have cached a forward decl when we could have created
// a non-forward decl. Go ahead and create a non-forward decl
// now.
- if (T && !T.isForwardDecl()) return T;
+ if (T && !T.isForwardDecl())
+ return T;
// Otherwise create the type.
llvm::DICompositeType Res = CreateLimitedType(Ty);
@@ -2214,10 +2199,10 @@ llvm::DIType CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty,
// Propagate members from the declaration to the definition
// CreateType(const RecordType*) will overwrite this with the members in the
// correct order if the full type is needed.
- Res.setTypeArray(T.getTypeArray());
+ DBuilder.replaceArrays(Res, T.getElements());
// And update the type cache.
- TypeCache[QTy.getAsOpaquePtr()] = Res;
+ TypeCache[QTy.getAsOpaquePtr()].reset(Res);
return Res;
}
@@ -2237,7 +2222,7 @@ llvm::DICompositeType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
// just return that.
llvm::DICompositeType T(getTypeOrNull(CGM.getContext().getRecordType(RD)));
if (T && (!T.isForwardDecl() || !RD->getDefinition()))
- return T;
+ return T;
// If this is just a forward or incomplete declaration, construct an
// appropriately marked node and just return it.
@@ -2252,29 +2237,26 @@ llvm::DICompositeType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
if (RD->isUnion())
- RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, llvm::DIArray(), 0,
- FullName);
+ RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line, Size,
+ Align, 0, llvm::DIArray(), 0, FullName);
else if (RD->isClass()) {
// FIXME: This could be a struct type giving a default visibility different
// than C++ class type, but needs llvm metadata changes first.
- RealDecl = DBuilder.createClassType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, 0, llvm::DIType(),
- llvm::DIArray(), llvm::DIType(),
- llvm::DIArray(), FullName);
+ RealDecl = DBuilder.createClassType(
+ RDContext, RDName, DefUnit, Line, Size, Align, 0, 0, llvm::DIType(),
+ llvm::DIArray(), llvm::DIType(), llvm::DIArray(), FullName);
} else
- RealDecl = DBuilder.createStructType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, llvm::DIType(),
- llvm::DIArray(), 0, llvm::DIType(),
- FullName);
+ RealDecl = DBuilder.createStructType(
+ RDContext, RDName, DefUnit, Line, Size, Align, 0, llvm::DIType(),
+ llvm::DIArray(), 0, llvm::DIType(), FullName);
- RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl;
+ RegionMap[Ty->getDecl()].reset(RealDecl);
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()].reset(RealDecl);
if (const ClassTemplateSpecializationDecl *TSpecial =
dyn_cast<ClassTemplateSpecializationDecl>(RD))
- RealDecl.setTypeArray(llvm::DIArray(),
- CollectCXXTemplateParams(TSpecial, DefUnit));
+ DBuilder.replaceArrays(RealDecl, llvm::DIArray(),
+ CollectCXXTemplateParams(TSpecial, DefUnit));
return RealDecl;
}
@@ -2299,24 +2281,148 @@ void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
} else if (RD->isDynamicClass())
ContainingType = RealDecl;
- RealDecl.setContainingType(ContainingType);
+ DBuilder.replaceVTableHolder(RealDecl, ContainingType);
}
/// CreateMemberType - Create new member and increase Offset by FType's size.
llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
- StringRef Name,
- uint64_t *Offset) {
+ StringRef Name, uint64_t *Offset) {
llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
- llvm::DIType Ty = DBuilder.createMemberType(Unit, Name, Unit, 0,
- FieldSize, FieldAlign,
- *Offset, 0, FieldTy);
+ llvm::DIType Ty = DBuilder.createMemberType(Unit, Name, Unit, 0, FieldSize,
+ FieldAlign, *Offset, 0, FieldTy);
*Offset += FieldSize;
return Ty;
}
-llvm::DIScope CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
+void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD,
+ llvm::DIFile Unit,
+ StringRef &Name, StringRef &LinkageName,
+ llvm::DIDescriptor &FDContext,
+ llvm::DIArray &TParamsArray,
+ unsigned &Flags) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+ Name = getFunctionName(FD);
+ // Use mangled name as linkage name for C/C++ functions.
+ if (FD->hasPrototype()) {
+ LinkageName = CGM.getMangledName(GD);
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+ }
+ // No need to replicate the linkage name if it isn't different from the
+ // subprogram name, no need to have it at all unless coverage is enabled or
+ // debug is set to more than just line tables.
+ if (LinkageName == Name ||
+ (!CGM.getCodeGenOpts().EmitGcovArcs &&
+ !CGM.getCodeGenOpts().EmitGcovNotes &&
+ DebugKind <= CodeGenOptions::DebugLineTablesOnly))
+ LinkageName = StringRef();
+
+ if (DebugKind >= CodeGenOptions::LimitedDebugInfo) {
+ if (const NamespaceDecl *NSDecl =
+ dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
+ FDContext = getOrCreateNameSpace(NSDecl);
+ else if (const RecordDecl *RDecl =
+ dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
+ FDContext = getContextDescriptor(cast<Decl>(RDecl));
+ // Collect template parameters.
+ TParamsArray = CollectFunctionTemplateParams(FD, Unit);
+ }
+}
+
+void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile &Unit,
+ unsigned &LineNo, QualType &T,
+ StringRef &Name, StringRef &LinkageName,
+ llvm::DIDescriptor &VDContext) {
+ Unit = getOrCreateFile(VD->getLocation());
+ LineNo = getLineNumber(VD->getLocation());
+
+ setLocation(VD->getLocation());
+
+ T = VD->getType();
+ if (T->isIncompleteArrayType()) {
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APInt ConstVal(32, 1);
+ QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+ T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+
+ Name = VD->getName();
+ if (VD->getDeclContext() && !isa<FunctionDecl>(VD->getDeclContext()) &&
+ !isa<ObjCMethodDecl>(VD->getDeclContext()))
+ LinkageName = CGM.getMangledName(VD);
+ if (LinkageName == Name)
+ LinkageName = StringRef();
+
+ // Since we emit declarations (DW_AT_members) for static members, place the
+ // definition of those static members in the namespace they were declared in
+ // in the source code (the lexical decl context).
+ // FIXME: Generalize this for even non-member global variables where the
+ // declaration and definition may have different lexical decl contexts, once
+ // we have support for emitting declarations of (non-member) global variables.
+ VDContext = getContextDescriptor(
+ dyn_cast<Decl>(VD->isStaticDataMember() ? VD->getLexicalDeclContext()
+ : VD->getDeclContext()));
+}
+
+llvm::DISubprogram
+CGDebugInfo::getFunctionForwardDeclaration(const FunctionDecl *FD) {
+ llvm::DIArray TParamsArray;
+ StringRef Name, LinkageName;
+ unsigned Flags = 0;
+ SourceLocation Loc = FD->getLocation();
+ llvm::DIFile Unit = getOrCreateFile(Loc);
+ llvm::DIDescriptor DContext(Unit);
+ unsigned Line = getLineNumber(Loc);
+
+ collectFunctionDeclProps(FD, Unit, Name, LinkageName, DContext,
+ TParamsArray, Flags);
+ // Build function type.
+ SmallVector<QualType, 16> ArgTypes;
+ for (const ParmVarDecl *Parm: FD->parameters())
+ ArgTypes.push_back(Parm->getType());
+ QualType FnType =
+ CGM.getContext().getFunctionType(FD->getReturnType(), ArgTypes,
+ FunctionProtoType::ExtProtoInfo());
+ llvm::DISubprogram SP =
+ DBuilder.createTempFunctionFwdDecl(DContext, Name, LinkageName, Unit, Line,
+ getOrCreateFunctionType(FD, FnType, Unit),
+ !FD->isExternallyVisible(),
+ false /*declaration*/, 0, Flags,
+ CGM.getLangOpts().Optimize, nullptr,
+ TParamsArray, getFunctionDeclaration(FD));
+ const FunctionDecl *CanonDecl = cast<FunctionDecl>(FD->getCanonicalDecl());
+ FwdDeclReplaceMap.emplace_back(
+ std::piecewise_construct, std::make_tuple(CanonDecl),
+ std::make_tuple(static_cast<llvm::Metadata *>(SP)));
+ return SP;
+}
+
+llvm::DIGlobalVariable
+CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) {
+ QualType T;
+ StringRef Name, LinkageName;
+ SourceLocation Loc = VD->getLocation();
+ llvm::DIFile Unit = getOrCreateFile(Loc);
+ llvm::DIDescriptor DContext(Unit);
+ unsigned Line = getLineNumber(Loc);
+
+ collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, DContext);
+ llvm::DIGlobalVariable GV =
+ DBuilder.createTempGlobalVariableFwdDecl(DContext, Name, LinkageName, Unit,
+ Line, getOrCreateType(T, Unit),
+ !VD->isExternallyVisible(),
+ nullptr, nullptr);
+ FwdDeclReplaceMap.emplace_back(
+ std::piecewise_construct,
+ std::make_tuple(cast<VarDecl>(VD->getCanonicalDecl())),
+ std::make_tuple(static_cast<llvm::Metadata *>(GV)));
+ return GV;
+}
+
+llvm::DIDescriptor CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
// We only need a declaration (not a definition) of the type - so use whatever
// we would otherwise do to get a type for a pointee. (forward declarations in
// limited debug info, full definitions (if the type definition is available)
@@ -2324,19 +2430,19 @@ llvm::DIScope CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
if (const TypeDecl *TD = dyn_cast<TypeDecl>(D))
return getOrCreateType(CGM.getContext().getTypeDeclType(TD),
getOrCreateFile(TD->getLocation()));
- // Otherwise fall back to a fairly rudimentary cache of existing declarations.
- // This doesn't handle providing declarations (for functions or variables) for
- // entities without definitions in this TU, nor when the definition proceeds
- // the call to this function.
- // FIXME: This should be split out into more specific maps with support for
- // emitting forward declarations and merging definitions with declarations,
- // the same way as we do for types.
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator I =
- DeclCache.find(D->getCanonicalDecl());
- if (I == DeclCache.end())
- return llvm::DIScope();
- llvm::Value *V = I->second;
- return llvm::DIScope(dyn_cast_or_null<llvm::MDNode>(V));
+ auto I = DeclCache.find(D->getCanonicalDecl());
+
+ if (I != DeclCache.end())
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(I->second));
+
+ // No definition for now. Emit a forward definition that might be
+ // merged with a potential upcoming definition.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ return getFunctionForwardDeclaration(FD);
+ else if (const auto *VD = dyn_cast<VarDecl>(D))
+ return getGlobalVariableForwardDeclaration(VD);
+
+ return llvm::DIDescriptor();
}
/// getFunctionDeclaration - Return debug info descriptor to describe method
@@ -2346,13 +2452,13 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
return llvm::DISubprogram();
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- if (!FD) return llvm::DISubprogram();
+ if (!FD)
+ return llvm::DISubprogram();
// Setup context.
llvm::DIScope S = getContextDescriptor(cast<Decl>(D->getDeclContext()));
- llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
- MI = SPCache.find(FD->getCanonicalDecl());
+ auto MI = SPCache.find(FD->getCanonicalDecl());
if (MI == SPCache.end()) {
if (const CXXMethodDecl *MD =
dyn_cast<CXXMethodDecl>(FD->getCanonicalDecl())) {
@@ -2363,18 +2469,15 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
}
}
if (MI != SPCache.end()) {
- llvm::Value *V = MI->second;
- llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(V));
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(MI->second));
if (SP.isSubprogram() && !SP.isDefinition())
return SP;
}
for (auto NextFD : FD->redecls()) {
- llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
- MI = SPCache.find(NextFD->getCanonicalDecl());
+ auto MI = SPCache.find(NextFD->getCanonicalDecl());
if (MI != SPCache.end()) {
- llvm::Value *V = MI->second;
- llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(V));
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(MI->second));
if (SP.isSubprogram() && !SP.isDefinition())
return SP;
}
@@ -2392,13 +2495,14 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
// llvm::DISubprogram::Verify() would return false, and
// subprogram DIE will miss DW_AT_decl_file and
// DW_AT_decl_line fields.
- return DBuilder.createSubroutineType(F, DBuilder.getOrCreateArray(None));
+ return DBuilder.createSubroutineType(F,
+ DBuilder.getOrCreateTypeArray(None));
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
return getOrCreateMethodType(Method, F);
if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
// Add "self" and "_cmd"
- SmallVector<llvm::Value *, 16> Elts;
+ SmallVector<llvm::Metadata *, 16> Elts;
// First element is always return type. For 'void' functions it is NULL.
QualType ResultTy = OMethod->getReturnType();
@@ -2406,7 +2510,7 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
// Replace the instancetype keyword with the actual type.
if (ResultTy == CGM.getContext().getObjCInstanceType())
ResultTy = CGM.getContext().getPointerType(
- QualType(OMethod->getClassInterface()->getTypeForDecl(), 0));
+ QualType(OMethod->getClassInterface()->getTypeForDecl(), 0));
Elts.push_back(getOrCreateType(ResultTy, F));
// "self" pointer is always first argument.
@@ -2419,8 +2523,11 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
// Get rest of the arguments.
for (const auto *PI : OMethod->params())
Elts.push_back(getOrCreateType(PI->getType(), F));
+ // Variadic methods need a special marker at the end of the type list.
+ if (OMethod->isVariadic())
+ Elts.push_back(DBuilder.createUnspecifiedParameter());
- llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
+ llvm::DITypeArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
return DBuilder.createSubroutineType(F, EltTypeArray);
}
@@ -2428,13 +2535,13 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
// unspecified parameter.
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
if (FD->isVariadic()) {
- SmallVector<llvm::Value *, 16> EltTys;
+ SmallVector<llvm::Metadata *, 16> EltTys;
EltTys.push_back(getOrCreateType(FD->getReturnType(), F));
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FnType))
for (unsigned i = 0, e = FPT->getNumParams(); i != e; ++i)
EltTys.push_back(getOrCreateType(FPT->getParamType(i), F));
EltTys.push_back(DBuilder.createUnspecifiedParameter());
- llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(EltTys);
+ llvm::DITypeArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
return DBuilder.createSubroutineType(F, EltTypeArray);
}
@@ -2442,12 +2549,9 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
}
/// EmitFunctionStart - Constructs the debug code for entering a function.
-void CGDebugInfo::EmitFunctionStart(GlobalDecl GD,
- SourceLocation Loc,
- SourceLocation ScopeLoc,
- QualType FnType,
- llvm::Function *Fn,
- CGBuilderTy &Builder) {
+void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
+ SourceLocation ScopeLoc, QualType FnType,
+ llvm::Function *Fn, CGBuilderTy &Builder) {
StringRef Name;
StringRef LinkageName;
@@ -2466,44 +2570,18 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD,
LinkageName = Fn->getName();
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// If there is a DISubprogram for this function available then use it.
- llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
- FI = SPCache.find(FD->getCanonicalDecl());
+ auto FI = SPCache.find(FD->getCanonicalDecl());
if (FI != SPCache.end()) {
- llvm::Value *V = FI->second;
- llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(V));
+ llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(FI->second));
if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
llvm::MDNode *SPN = SP;
- LexicalBlockStack.push_back(SPN);
- RegionMap[D] = llvm::WeakVH(SP);
+ LexicalBlockStack.emplace_back(SPN);
+ RegionMap[D].reset(SP);
return;
}
}
- Name = getFunctionName(FD);
- // Use mangled name as linkage name for C/C++ functions.
- if (FD->hasPrototype()) {
- LinkageName = CGM.getMangledName(GD);
- Flags |= llvm::DIDescriptor::FlagPrototyped;
- }
- // No need to replicate the linkage name if it isn't different from the
- // subprogram name, no need to have it at all unless coverage is enabled or
- // debug is set to more than just line tables.
- if (LinkageName == Name ||
- (!CGM.getCodeGenOpts().EmitGcovArcs &&
- !CGM.getCodeGenOpts().EmitGcovNotes &&
- DebugKind <= CodeGenOptions::DebugLineTablesOnly))
- LinkageName = StringRef();
-
- if (DebugKind >= CodeGenOptions::LimitedDebugInfo) {
- if (const NamespaceDecl *NSDecl =
- dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
- FDContext = getOrCreateNameSpace(NSDecl);
- else if (const RecordDecl *RDecl =
- dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
- FDContext = getContextDescriptor(cast<Decl>(RDecl));
-
- // Collect template parameters.
- TParamsArray = CollectFunctionTemplateParams(FD, Unit);
- }
+ collectFunctionDeclProps(GD, Unit, Name, LinkageName, FDContext,
+ TParamsArray, Flags);
} else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
Name = getObjCMethodName(OMD);
Flags |= llvm::DIDescriptor::FlagPrototyped;
@@ -2529,22 +2607,23 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD,
// FunctionDecls. When/if we fix this we can have FDContext be TheCU/null for
// all subprograms instead of the actual context since subprogram definitions
// are emitted as CU level entities by the backend.
- llvm::DISubprogram SP =
- DBuilder.createFunction(FDContext, Name, LinkageName, Unit, LineNo,
- getOrCreateFunctionType(D, FnType, Unit),
- Fn->hasInternalLinkage(), true /*definition*/,
- ScopeLine, Flags,
- CGM.getLangOpts().Optimize, Fn, TParamsArray,
- getFunctionDeclaration(D));
- if (HasDecl)
- DeclCache.insert(std::make_pair(D->getCanonicalDecl(), llvm::WeakVH(SP)));
+ llvm::DISubprogram SP = DBuilder.createFunction(
+ FDContext, Name, LinkageName, Unit, LineNo,
+ getOrCreateFunctionType(D, FnType, Unit), Fn->hasInternalLinkage(),
+ true /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize, Fn,
+ TParamsArray, getFunctionDeclaration(D));
+ // We might get here with a VarDecl in the case we're generating
+ // code for the initialization of globals. Do not record these decls
+ // as they will overwrite the actual VarDecl Decl in the cache.
+ if (HasDecl && isa<FunctionDecl>(D))
+ DeclCache[D->getCanonicalDecl()].reset(static_cast<llvm::Metadata *>(SP));
// Push the function onto the lexical block stack.
llvm::MDNode *SPN = SP;
- LexicalBlockStack.push_back(SPN);
+ LexicalBlockStack.emplace_back(SPN);
if (HasDecl)
- RegionMap[D] = llvm::WeakVH(SP);
+ RegionMap[D].reset(SP);
}
/// EmitLocation - Emit metadata to indicate a change in line/column
@@ -2555,38 +2634,25 @@ void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc,
// Update our current location
setLocation(Loc);
- if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
-
- // Don't bother if things are the same as last time.
- SourceManager &SM = CGM.getContext().getSourceManager();
- if (CurLoc == PrevLoc ||
- SM.getExpansionLoc(CurLoc) == SM.getExpansionLoc(PrevLoc))
- // New Builder may not be in sync with CGDebugInfo.
- if (!Builder.getCurrentDebugLocation().isUnknown() &&
- Builder.getCurrentDebugLocation().getScope(CGM.getLLVMContext()) ==
- LexicalBlockStack.back())
- return;
-
- // Update last state.
- PrevLoc = CurLoc;
+ if (CurLoc.isInvalid() || CurLoc.isMacroID())
+ return;
llvm::MDNode *Scope = LexicalBlockStack.back();
- Builder.SetCurrentDebugLocation(llvm::DebugLoc::get
- (getLineNumber(CurLoc),
- getColumnNumber(CurLoc, ForceColumnInfo),
- Scope));
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(
+ getLineNumber(CurLoc), getColumnNumber(CurLoc, ForceColumnInfo), Scope));
}
/// CreateLexicalBlock - Creates a new lexical block node and pushes it on
/// the stack.
void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
+ llvm::MDNode *Back = nullptr;
+ if (!LexicalBlockStack.empty())
+ Back = LexicalBlockStack.back().get();
llvm::DIDescriptor D = DBuilder.createLexicalBlock(
- llvm::DIDescriptor(LexicalBlockStack.empty() ? nullptr
- : LexicalBlockStack.back()),
- getOrCreateFile(CurLoc), getLineNumber(CurLoc), getColumnNumber(CurLoc),
- 0);
+ llvm::DIDescriptor(Back), getOrCreateFile(CurLoc), getLineNumber(CurLoc),
+ getColumnNumber(CurLoc));
llvm::MDNode *DN = D;
- LexicalBlockStack.push_back(DN);
+ LexicalBlockStack.emplace_back(DN);
}
/// EmitLexicalBlockStart - Constructs the debug code for entering a declarative
@@ -2596,13 +2662,15 @@ void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder,
// Set our current location.
setLocation(Loc);
+ // Emit a line table change for the current location inside the new scope.
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(
+ getLineNumber(Loc), getColumnNumber(Loc), LexicalBlockStack.back()));
+
+ if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ return;
+
// Create a new lexical block and push it on the stack.
CreateLexicalBlock(Loc);
-
- // Emit a line table change for the current location inside the new scope.
- Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(Loc),
- getColumnNumber(Loc),
- LexicalBlockStack.back()));
}
/// EmitLexicalBlockEnd - Constructs the debug code for exiting a declarative
@@ -2614,6 +2682,9 @@ void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder,
// Provide an entry in the line table for the end of the block.
EmitLocation(Builder, Loc);
+ if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ return;
+
LexicalBlockStack.pop_back();
}
@@ -2624,8 +2695,11 @@ void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
assert(RCount <= LexicalBlockStack.size() && "Region stack mismatch");
// Pop all regions for this function.
- while (LexicalBlockStack.size() != RCount)
- EmitLexicalBlockEnd(Builder, CurLoc);
+ while (LexicalBlockStack.size() != RCount) {
+ // Provide an entry in the line table for the end of the block.
+ EmitLocation(Builder, CurLoc);
+ LexicalBlockStack.pop_back();
+ }
FnBeginRegionCount.pop_back();
}
@@ -2634,7 +2708,7 @@ void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
uint64_t *XOffset) {
- SmallVector<llvm::Value *, 5> EltTys;
+ SmallVector<llvm::Metadata *, 5> EltTys;
QualType FType;
uint64_t FieldSize, FieldOffset;
unsigned FieldAlign;
@@ -2653,31 +2727,29 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
bool HasCopyAndDispose = CGM.getContext().BlockRequiresCopying(Type, VD);
if (HasCopyAndDispose) {
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
- EltTys.push_back(CreateMemberType(Unit, FType, "__copy_helper",
- &FieldOffset));
- EltTys.push_back(CreateMemberType(Unit, FType, "__destroy_helper",
- &FieldOffset));
+ EltTys.push_back(
+ CreateMemberType(Unit, FType, "__copy_helper", &FieldOffset));
+ EltTys.push_back(
+ CreateMemberType(Unit, FType, "__destroy_helper", &FieldOffset));
}
bool HasByrefExtendedLayout;
Qualifiers::ObjCLifetime Lifetime;
- if (CGM.getContext().getByrefLifetime(Type,
- Lifetime, HasByrefExtendedLayout)
- && HasByrefExtendedLayout) {
+ if (CGM.getContext().getByrefLifetime(Type, Lifetime,
+ HasByrefExtendedLayout) &&
+ HasByrefExtendedLayout) {
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
- EltTys.push_back(CreateMemberType(Unit, FType,
- "__byref_variable_layout",
- &FieldOffset));
+ EltTys.push_back(
+ CreateMemberType(Unit, FType, "__byref_variable_layout", &FieldOffset));
}
CharUnits Align = CGM.getContext().getDeclAlign(VD);
if (Align > CGM.getContext().toCharUnitsFromBits(
- CGM.getTarget().getPointerAlign(0))) {
- CharUnits FieldOffsetInBytes
- = CGM.getContext().toCharUnitsFromBits(FieldOffset);
- CharUnits AlignedOffsetInBytes
- = FieldOffsetInBytes.RoundUpToAlignment(Align);
- CharUnits NumPaddingBytes
- = AlignedOffsetInBytes - FieldOffsetInBytes;
+ CGM.getTarget().getPointerAlign(0))) {
+ CharUnits FieldOffsetInBytes =
+ CGM.getContext().toCharUnitsFromBits(FieldOffset);
+ CharUnits AlignedOffsetInBytes =
+ FieldOffsetInBytes.RoundUpToAlignment(Align);
+ CharUnits NumPaddingBytes = AlignedOffsetInBytes - FieldOffsetInBytes;
if (NumPaddingBytes.isPositive()) {
llvm::APInt pad(32, NumPaddingBytes.getQuantity());
@@ -2693,9 +2765,8 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
FieldAlign = CGM.getContext().toBits(Align);
*XOffset = FieldOffset;
- FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
+ FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit, 0, FieldSize,
+ FieldAlign, FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
@@ -2709,8 +2780,8 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
/// EmitDeclare - Emit local variable declaration debug info.
void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::LLVMConstants Tag,
- llvm::Value *Storage,
- unsigned ArgNo, CGBuilderTy &Builder) {
+ llvm::Value *Storage, unsigned ArgNo,
+ CGBuilderTy &Builder) {
assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
@@ -2760,29 +2831,26 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::LLVMConstants Tag,
if (!Name.empty()) {
if (VD->hasAttr<BlocksAttr>()) {
CharUnits offset = CharUnits::fromQuantity(32);
- SmallVector<llvm::Value *, 9> addr;
- llvm::Type *Int64Ty = CGM.Int64Ty;
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ SmallVector<int64_t, 9> addr;
+ addr.push_back(llvm::dwarf::DW_OP_plus);
// offset of __forwarding field
offset = CGM.getContext().toCharUnitsFromBits(
- CGM.getTarget().getPointerWidth(0));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ CGM.getTarget().getPointerWidth(0));
+ addr.push_back(offset.getQuantity());
+ addr.push_back(llvm::dwarf::DW_OP_deref);
+ addr.push_back(llvm::dwarf::DW_OP_plus);
// offset of x field
offset = CGM.getContext().toCharUnitsFromBits(XOffset);
- addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(offset.getQuantity());
// Create the descriptor for the variable.
- llvm::DIVariable D =
- DBuilder.createComplexVariable(Tag,
- llvm::DIDescriptor(Scope),
- VD->getName(), Unit, Line, Ty,
- addr, ArgNo);
+ llvm::DIVariable D = DBuilder.createLocalVariable(
+ Tag, llvm::DIDescriptor(Scope), VD->getName(), Unit, Line, Ty, ArgNo);
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
- DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr),
+ Builder.GetInsertBlock());
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
return;
} else if (isa<VariableArrayType>(VD->getType()))
@@ -2801,15 +2869,13 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::LLVMConstants Tag,
continue;
// Use VarDecl's Tag, Scope and Line number.
- llvm::DIVariable D =
- DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
- FieldName, Unit, Line, FieldTy,
- CGM.getLangOpts().Optimize, Flags,
- ArgNo);
+ llvm::DIVariable D = DBuilder.createLocalVariable(
+ Tag, llvm::DIDescriptor(Scope), FieldName, Unit, Line, FieldTy,
+ CGM.getLangOpts().Optimize, Flags, ArgNo);
// Insert an llvm.dbg.declare into the current block.
- llvm::Instruction *Call =
- DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ llvm::Instruction *Call = DBuilder.insertDeclare(
+ Storage, D, DBuilder.createExpression(), Builder.GetInsertBlock());
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
}
return;
@@ -2817,14 +2883,13 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::LLVMConstants Tag,
}
// Create the descriptor for the variable.
- llvm::DIVariable D =
- DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
- Name, Unit, Line, Ty,
- CGM.getLangOpts().Optimize, Flags, ArgNo);
+ llvm::DIVariable D = DBuilder.createLocalVariable(
+ Tag, llvm::DIDescriptor(Scope), Name, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags, ArgNo);
// Insert an llvm.dbg.declare into the current block.
- llvm::Instruction *Call =
- DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ llvm::Instruction *Call = DBuilder.insertDeclare(
+ Storage, D, DBuilder.createExpression(), Builder.GetInsertBlock());
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
}
@@ -2844,14 +2909,14 @@ void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
llvm::DIType CGDebugInfo::CreateSelfType(const QualType &QualTy,
llvm::DIType Ty) {
llvm::DIType CachedTy = getTypeOrNull(QualTy);
- if (CachedTy) Ty = CachedTy;
+ if (CachedTy)
+ Ty = CachedTy;
return DBuilder.createObjectPointerType(Ty);
}
-void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
- llvm::Value *Storage,
- CGBuilderTy &Builder,
- const CGBlockInfo &blockInfo) {
+void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
+ const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo, llvm::Instruction *InsertPoint) {
assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
@@ -2880,40 +2945,42 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
const llvm::DataLayout &target = CGM.getDataLayout();
CharUnits offset = CharUnits::fromQuantity(
- target.getStructLayout(blockInfo.StructureType)
+ target.getStructLayout(blockInfo.StructureType)
->getElementOffset(blockInfo.getCapture(VD).getIndex()));
- SmallVector<llvm::Value *, 9> addr;
- llvm::Type *Int64Ty = CGM.Int64Ty;
+ SmallVector<int64_t, 9> addr;
if (isa<llvm::AllocaInst>(Storage))
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(llvm::dwarf::DW_OP_deref);
+ addr.push_back(llvm::dwarf::DW_OP_plus);
+ addr.push_back(offset.getQuantity());
if (isByRef) {
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ addr.push_back(llvm::dwarf::DW_OP_deref);
+ addr.push_back(llvm::dwarf::DW_OP_plus);
// offset of __forwarding field
- offset = CGM.getContext()
- .toCharUnitsFromBits(target.getPointerSizeInBits(0));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ offset =
+ CGM.getContext().toCharUnitsFromBits(target.getPointerSizeInBits(0));
+ addr.push_back(offset.getQuantity());
+ addr.push_back(llvm::dwarf::DW_OP_deref);
+ addr.push_back(llvm::dwarf::DW_OP_plus);
// offset of x field
offset = CGM.getContext().toCharUnitsFromBits(XOffset);
- addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(offset.getQuantity());
}
// Create the descriptor for the variable.
llvm::DIVariable D =
- DBuilder.createComplexVariable(llvm::dwarf::DW_TAG_auto_variable,
+ DBuilder.createLocalVariable(llvm::dwarf::DW_TAG_auto_variable,
llvm::DIDescriptor(LexicalBlockStack.back()),
- VD->getName(), Unit, Line, Ty, addr);
+ VD->getName(), Unit, Line, Ty);
// Insert an llvm.dbg.declare into the current block.
- llvm::Instruction *Call =
- DBuilder.insertDeclare(Storage, D, Builder.GetInsertPoint());
- Call->setDebugLoc(llvm::DebugLoc::get(Line, Column,
- LexicalBlockStack.back()));
+ llvm::Instruction *Call = InsertPoint ?
+ DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr),
+ InsertPoint)
+ : DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr),
+ Builder.GetInsertBlock());
+ Call->setDebugLoc(
+ llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back()));
}
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
@@ -2926,17 +2993,18 @@ void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
}
namespace {
- struct BlockLayoutChunk {
- uint64_t OffsetInBits;
- const BlockDecl::Capture *Capture;
- };
- bool operator<(const BlockLayoutChunk &l, const BlockLayoutChunk &r) {
- return l.OffsetInBits < r.OffsetInBits;
- }
+struct BlockLayoutChunk {
+ uint64_t OffsetInBits;
+ const BlockDecl::Capture *Capture;
+};
+bool operator<(const BlockLayoutChunk &l, const BlockLayoutChunk &r) {
+ return l.OffsetInBits < r.OffsetInBits;
+}
}
void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::Value *Arg,
+ unsigned ArgNo,
llvm::Value *LocalAddr,
CGBuilderTy &Builder) {
assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
@@ -2953,9 +3021,9 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
getContextDescriptor(cast<Decl>(blockDecl->getDeclContext()));
const llvm::StructLayout *blockLayout =
- CGM.getDataLayout().getStructLayout(block.StructureType);
+ CGM.getDataLayout().getStructLayout(block.StructureType);
- SmallVector<llvm::Value*, 16> fields;
+ SmallVector<llvm::Metadata *, 16> fields;
fields.push_back(createFieldType("__isa", C.VoidPtrTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(0),
tunit, tunit));
@@ -2965,16 +3033,16 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
fields.push_back(createFieldType("__reserved", C.IntTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(2),
tunit, tunit));
- fields.push_back(createFieldType("__FuncPtr", C.VoidPtrTy, 0, loc, AS_public,
+ auto *FnTy = block.getBlockExpr()->getFunctionType();
+ auto FnPtrType = CGM.getContext().getPointerType(FnTy->desugar());
+ fields.push_back(createFieldType("__FuncPtr", FnPtrType, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(3),
tunit, tunit));
- fields.push_back(createFieldType("__descriptor",
- C.getPointerType(block.NeedsCopyDispose ?
- C.getBlockDescriptorExtendedType() :
- C.getBlockDescriptorType()),
- 0, loc, AS_public,
- blockLayout->getElementOffsetInBits(4),
- tunit, tunit));
+ fields.push_back(createFieldType(
+ "__descriptor", C.getPointerType(block.NeedsCopyDispose
+ ? C.getBlockDescriptorExtendedType()
+ : C.getBlockDescriptorType()),
+ 0, loc, AS_public, blockLayout->getElementOffsetInBits(4), tunit, tunit));
// We want to sort the captures by offset, not because DWARF
// requires this, but because we're paranoid about debuggers.
@@ -2984,7 +3052,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
if (blockDecl->capturesCXXThis()) {
BlockLayoutChunk chunk;
chunk.OffsetInBits =
- blockLayout->getElementOffsetInBits(block.CXXThisIndex);
+ blockLayout->getElementOffsetInBits(block.CXXThisIndex);
chunk.Capture = nullptr;
chunks.push_back(chunk);
}
@@ -3000,7 +3068,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
BlockLayoutChunk chunk;
chunk.OffsetInBits =
- blockLayout->getElementOffsetInBits(captureInfo.getIndex());
+ blockLayout->getElementOffsetInBits(captureInfo.getIndex());
chunk.Capture = &capture;
chunks.push_back(chunk);
}
@@ -3008,15 +3076,16 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
// Sort by offset.
llvm::array_pod_sort(chunks.begin(), chunks.end());
- for (SmallVectorImpl<BlockLayoutChunk>::iterator
- i = chunks.begin(), e = chunks.end(); i != e; ++i) {
+ for (SmallVectorImpl<BlockLayoutChunk>::iterator i = chunks.begin(),
+ e = chunks.end();
+ i != e; ++i) {
uint64_t offsetInBits = i->OffsetInBits;
const BlockDecl::Capture *capture = i->Capture;
// If we have a null capture, this must be the C++ 'this' capture.
if (!capture) {
const CXXMethodDecl *method =
- cast<CXXMethodDecl>(blockDecl->getNonClosureContext());
+ cast<CXXMethodDecl>(blockDecl->getNonClosureContext());
QualType type = method->getThisType(C);
fields.push_back(createFieldType("this", type, 0, loc, AS_public,
@@ -3029,33 +3098,33 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::DIType fieldType;
if (capture->isByRef()) {
- std::pair<uint64_t,unsigned> ptrInfo = C.getTypeInfo(C.VoidPtrTy);
+ TypeInfo PtrInfo = C.getTypeInfo(C.VoidPtrTy);
// FIXME: this creates a second copy of this type!
uint64_t xoffset;
fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset);
- fieldType = DBuilder.createPointerType(fieldType, ptrInfo.first);
- fieldType = DBuilder.createMemberType(tunit, name, tunit, line,
- ptrInfo.first, ptrInfo.second,
- offsetInBits, 0, fieldType);
+ fieldType = DBuilder.createPointerType(fieldType, PtrInfo.Width);
+ fieldType =
+ DBuilder.createMemberType(tunit, name, tunit, line, PtrInfo.Width,
+ PtrInfo.Align, offsetInBits, 0, fieldType);
} else {
- fieldType = createFieldType(name, variable->getType(), 0,
- loc, AS_public, offsetInBits, tunit, tunit);
+ fieldType = createFieldType(name, variable->getType(), 0, loc, AS_public,
+ offsetInBits, tunit, tunit);
}
fields.push_back(fieldType);
}
SmallString<36> typeName;
- llvm::raw_svector_ostream(typeName)
- << "__block_literal_" << CGM.getUniqueBlockCount();
+ llvm::raw_svector_ostream(typeName) << "__block_literal_"
+ << CGM.getUniqueBlockCount();
llvm::DIArray fieldsArray = DBuilder.getOrCreateArray(fields);
llvm::DIType type =
- DBuilder.createStructType(tunit, typeName.str(), tunit, line,
- CGM.getContext().toBits(block.BlockSize),
- CGM.getContext().toBits(block.BlockAlign),
- 0, llvm::DIType(), fieldsArray);
+ DBuilder.createStructType(tunit, typeName.str(), tunit, line,
+ CGM.getContext().toBits(block.BlockSize),
+ CGM.getContext().toBits(block.BlockAlign), 0,
+ llvm::DIType(), fieldsArray);
type = DBuilder.createPointerType(type, CGM.PointerWidthInBits);
// Get overall information about the block.
@@ -3063,24 +3132,22 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::MDNode *scope = LexicalBlockStack.back();
// Create the descriptor for the parameter.
- llvm::DIVariable debugVar =
- DBuilder.createLocalVariable(llvm::dwarf::DW_TAG_arg_variable,
- llvm::DIDescriptor(scope),
- Arg->getName(), tunit, line, type,
- CGM.getLangOpts().Optimize, flags,
- cast<llvm::Argument>(Arg)->getArgNo() + 1);
+ llvm::DIVariable debugVar = DBuilder.createLocalVariable(
+ llvm::dwarf::DW_TAG_arg_variable, llvm::DIDescriptor(scope),
+ Arg->getName(), tunit, line, type, CGM.getLangOpts().Optimize, flags,
+ ArgNo);
if (LocalAddr) {
// Insert an llvm.dbg.value into the current block.
- llvm::Instruction *DbgVal =
- DBuilder.insertDbgValueIntrinsic(LocalAddr, 0, debugVar,
- Builder.GetInsertBlock());
+ llvm::Instruction *DbgVal = DBuilder.insertDbgValueIntrinsic(
+ LocalAddr, 0, debugVar, DBuilder.createExpression(),
+ Builder.GetInsertBlock());
DbgVal->setDebugLoc(llvm::DebugLoc::get(line, column, scope));
}
// Insert an llvm.dbg.declare into the current block.
- llvm::Instruction *DbgDecl =
- DBuilder.insertDeclare(Arg, debugVar, Builder.GetInsertBlock());
+ llvm::Instruction *DbgDecl = DBuilder.insertDeclare(
+ Arg, debugVar, DBuilder.createExpression(), Builder.GetInsertBlock());
DbgDecl->setDebugLoc(llvm::DebugLoc::get(line, column, scope));
}
@@ -3090,8 +3157,7 @@ llvm::DIDerivedType
CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) {
if (!D->isStaticDataMember())
return llvm::DIDerivedType();
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator MI =
- StaticDataMemberCache.find(D->getCanonicalDecl());
+ auto MI = StaticDataMemberCache.find(D->getCanonicalDecl());
if (MI != StaticDataMemberCache.end()) {
assert(MI->second && "Static data member declaration should still exist");
return llvm::DIDerivedType(cast<llvm::MDNode>(MI->second));
@@ -3099,10 +3165,9 @@ CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) {
// If the member wasn't found in the cache, lazily construct and add it to the
// type (used when a limited form of the type is emitted).
- llvm::DICompositeType Ctxt(
- getContextDescriptor(cast<Decl>(D->getDeclContext())));
- llvm::DIDerivedType T = CreateRecordStaticField(D, Ctxt);
- return T;
+ auto DC = D->getDeclContext();
+ llvm::DICompositeType Ctxt(getContextDescriptor(cast<Decl>(DC)));
+ return CreateRecordStaticField(D, Ctxt, cast<RecordDecl>(DC));
}
/// Recursively collect all of the member fields of a global anonymous decl and
@@ -3128,10 +3193,9 @@ CGDebugInfo::CollectAnonRecordDecls(const RecordDecl *RD, llvm::DIFile Unit,
continue;
}
// Use VarDecl's Tag, Scope and Line number.
- GV = DBuilder.createStaticVariable(DContext, FieldName, LinkageName, Unit,
- LineNo, FieldTy,
- Var->hasInternalLinkage(), Var,
- llvm::DIDerivedType());
+ GV = DBuilder.createGlobalVariable(
+ DContext, FieldName, LinkageName, Unit, LineNo, FieldTy,
+ Var->hasInternalLinkage(), Var, llvm::DIDerivedType());
}
return GV;
}
@@ -3141,32 +3205,12 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *D) {
assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
// Create global variable debug descriptor.
- llvm::DIFile Unit = getOrCreateFile(D->getLocation());
- unsigned LineNo = getLineNumber(D->getLocation());
-
- setLocation(D->getLocation());
-
- QualType T = D->getType();
- if (T->isIncompleteArrayType()) {
-
- // CodeGen turns int[] into int[1] so we'll do the same here.
- llvm::APInt ConstVal(32, 1);
- QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
-
- T = CGM.getContext().getConstantArrayType(ET, ConstVal,
- ArrayType::Normal, 0);
- }
-
- StringRef DeclName = D->getName();
- StringRef LinkageName;
- if (D->getDeclContext() && !isa<FunctionDecl>(D->getDeclContext()) &&
- !isa<ObjCMethodDecl>(D->getDeclContext()))
- LinkageName = Var->getName();
- if (LinkageName == DeclName)
- LinkageName = StringRef();
-
- llvm::DIDescriptor DContext =
- getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()));
+ llvm::DIFile Unit;
+ llvm::DIDescriptor DContext;
+ unsigned LineNo;
+ StringRef DeclName, LinkageName;
+ QualType T;
+ collectVarDeclProps(D, Unit, LineNo, T, DeclName, LinkageName, DContext);
// Attempt to store one global variable for the declaration - even if we
// emit a lot of fields.
@@ -3177,15 +3221,16 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
// to find the name of any field in the union.
if (T->isUnionType() && DeclName.empty()) {
const RecordDecl *RD = cast<RecordType>(T)->getDecl();
- assert(RD->isAnonymousStructOrUnion() && "unnamed non-anonymous struct or union?");
+ assert(RD->isAnonymousStructOrUnion() &&
+ "unnamed non-anonymous struct or union?");
GV = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
} else {
- GV = DBuilder.createStaticVariable(
+ GV = DBuilder.createGlobalVariable(
DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit),
Var->hasInternalLinkage(), Var,
getOrCreateStaticDataMemberDeclarationOrNull(D));
}
- DeclCache.insert(std::make_pair(D->getCanonicalDecl(), llvm::WeakVH(GV)));
+ DeclCache[D->getCanonicalDecl()].reset(static_cast<llvm::Metadata *>(GV));
}
/// EmitGlobalVariable - Emit global variable's debug info.
@@ -3208,16 +3253,25 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
if (isa<FunctionDecl>(VD->getDeclContext()))
return;
VD = cast<ValueDecl>(VD->getCanonicalDecl());
- auto pair = DeclCache.insert(std::make_pair(VD, llvm::WeakVH()));
- if (!pair.second)
+ auto *VarD = cast<VarDecl>(VD);
+ if (VarD->isStaticDataMember()) {
+ auto *RD = cast<RecordDecl>(VarD->getDeclContext());
+ getContextDescriptor(RD);
+ // Ensure that the type is retained even though it's otherwise unreferenced.
+ RetainedTypes.push_back(
+ CGM.getContext().getRecordType(RD).getAsOpaquePtr());
return;
+ }
+
llvm::DIDescriptor DContext =
getContextDescriptor(dyn_cast<Decl>(VD->getDeclContext()));
- llvm::DIGlobalVariable GV = DBuilder.createStaticVariable(
+
+ auto &GV = DeclCache[VD];
+ if (GV)
+ return;
+ GV.reset(DBuilder.createGlobalVariable(
DContext, Name, StringRef(), Unit, getLineNumber(VD->getLocation()), Ty,
- true, Init,
- getOrCreateStaticDataMemberDeclarationOrNull(cast<VarDecl>(VD)));
- pair.first->second = llvm::WeakVH(GV);
+ true, Init, getOrCreateStaticDataMemberDeclarationOrNull(VarD)));
}
llvm::DIScope CGDebugInfo::getCurrentContextDescriptor(const Decl *D) {
@@ -3243,7 +3297,7 @@ void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) {
// Emitting one decl is sufficient - debuggers can detect that this is an
// overloaded name & provide lookup for all the overloads.
const UsingShadowDecl &USD = **UD.shadow_begin();
- if (llvm::DIScope Target =
+ if (llvm::DIDescriptor Target =
getDeclarationOrDefinition(USD.getUnderlyingDecl()))
DBuilder.createImportedDeclaration(
getCurrentContextDescriptor(cast<Decl>(USD.getDeclContext())), Target,
@@ -3254,7 +3308,7 @@ llvm::DIImportedEntity
CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) {
if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo)
return llvm::DIImportedEntity(nullptr);
- llvm::WeakVH &VH = NamespaceAliasCache[&NA];
+ auto &VH = NamespaceAliasCache[&NA];
if (VH)
return llvm::DIImportedEntity(cast<llvm::MDNode>(VH));
llvm::DIImportedEntity R(nullptr);
@@ -3270,7 +3324,7 @@ CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) {
getCurrentContextDescriptor(cast<Decl>(NA.getDeclContext())),
getOrCreateNameSpace(cast<NamespaceDecl>(NA.getAliasedNamespace())),
getLineNumber(NA.getLocation()), NA.getName());
- VH = R;
+ VH.reset(R);
return R;
}
@@ -3279,8 +3333,7 @@ CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) {
llvm::DINameSpace
CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
NSDecl = NSDecl->getCanonicalDecl();
- llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH>::iterator I =
- NameSpaceCache.find(NSDecl);
+ auto I = NameSpaceCache.find(NSDecl);
if (I != NameSpaceCache.end())
return llvm::DINameSpace(cast<llvm::MDNode>(I->second));
@@ -3290,7 +3343,7 @@ CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()));
llvm::DINameSpace NS =
DBuilder.createNameSpace(Context, NSDecl->getName(), FileD, LineNo);
- NameSpaceCache[NSDecl] = llvm::WeakVH(NS);
+ NameSpaceCache[NSDecl].reset(NS);
return NS;
}
@@ -3318,6 +3371,24 @@ void CGDebugInfo::finalize() {
Ty.replaceAllUsesWith(CGM.getLLVMContext(), RepTy);
}
+ for (const auto &p : FwdDeclReplaceMap) {
+ assert(p.second);
+ llvm::DIDescriptor FwdDecl(cast<llvm::MDNode>(p.second));
+ llvm::Metadata *Repl;
+
+ auto it = DeclCache.find(p.first);
+ // If there has been no definition for the declaration, call RAUW
+ // with ourselves, that will destroy the temporary MDNode and
+ // replace it with a standard one, avoiding leaking memory.
+ if (it == DeclCache.end())
+ Repl = p.second;
+ else
+ Repl = it->second;
+
+ FwdDecl.replaceAllUsesWith(CGM.getLLVMContext(),
+ llvm::DIDescriptor(cast<llvm::MDNode>(Repl)));
+ }
+
// We keep our own list of retained types, because we need to look
// up the final type in the type cache.
for (std::vector<void *>::const_iterator RI = RetainedTypes.begin(),
@@ -3326,3 +3397,11 @@ void CGDebugInfo::finalize() {
DBuilder.finalize();
}
+
+void CGDebugInfo::EmitExplicitCastType(QualType Ty) {
+ if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo)
+ return;
+ llvm::DIType DieTy = getOrCreateType(Ty, getOrCreateMainFile());
+ // Don't ignore in case of explicit cast where it is referenced indirectly.
+ DBuilder.retainType(DieTy);
+}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index fc3f434991fa..0be032c1d790 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGDEBUGINFO_H
-#define CLANG_CODEGEN_CGDEBUGINFO_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGDEBUGINFO_H
+#define LLVM_CLANG_LIB_CODEGEN_CGDEBUGINFO_H
#include "CGBuilder.h"
#include "clang/AST/Expr.h"
@@ -53,7 +53,7 @@ class CGDebugInfo {
const CodeGenOptions::DebugInfoKind DebugKind;
llvm::DIBuilder DBuilder;
llvm::DICompileUnit TheCU;
- SourceLocation CurLoc, PrevLoc;
+ SourceLocation CurLoc;
llvm::DIType VTablePtrType;
llvm::DIType ClassTy;
llvm::DICompositeType ObjTy;
@@ -65,7 +65,7 @@ class CGDebugInfo {
llvm::DIType BlockLiteralGeneric;
/// TypeCache - Cache of previously constructed Types.
- llvm::DenseMap<const void *, llvm::WeakVH> TypeCache;
+ llvm::DenseMap<const void *, llvm::TrackingMDRef> TypeCache;
struct ObjCInterfaceCacheEntry {
const ObjCInterfaceType *Type;
@@ -85,11 +85,16 @@ class CGDebugInfo {
/// ReplaceMap - Cache of forward declared types to RAUW at the end of
/// compilation.
- std::vector<std::pair<const TagType *, llvm::WeakVH>> ReplaceMap;
+ std::vector<std::pair<const TagType *, llvm::TrackingMDRef>> ReplaceMap;
+
+ /// \brief Cache of replaceable forward declarartions (functions and
+ /// variables) to RAUW at the end of compilation.
+ std::vector<std::pair<const DeclaratorDecl *, llvm::TrackingMDRef>>
+ FwdDeclReplaceMap;
// LexicalBlockStack - Keep track of our current nested lexical block.
- std::vector<llvm::TrackingVH<llvm::MDNode> > LexicalBlockStack;
- llvm::DenseMap<const Decl *, llvm::WeakVH> RegionMap;
+ std::vector<llvm::TrackingMDNodeRef> LexicalBlockStack;
+ llvm::DenseMap<const Decl *, llvm::TrackingMDRef> RegionMap;
// FnBeginRegionCount - Keep track of LexicalBlockStack counter at the
// beginning of a function. This is used to pop unbalanced regions at
// the end of a function.
@@ -100,14 +105,15 @@ class CGDebugInfo {
llvm::BumpPtrAllocator DebugInfoNames;
StringRef CWDName;
- llvm::DenseMap<const char *, llvm::WeakVH> DIFileCache;
- llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache;
+ llvm::DenseMap<const char *, llvm::TrackingMDRef> DIFileCache;
+ llvm::DenseMap<const FunctionDecl *, llvm::TrackingMDRef> SPCache;
/// \brief Cache declarations relevant to DW_TAG_imported_declarations (C++
/// using declarations) that aren't covered by other more specific caches.
- llvm::DenseMap<const Decl *, llvm::WeakVH> DeclCache;
- llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH> NameSpaceCache;
- llvm::DenseMap<const NamespaceAliasDecl *, llvm::WeakVH> NamespaceAliasCache;
- llvm::DenseMap<const Decl *, llvm::WeakVH> StaticDataMemberCache;
+ llvm::DenseMap<const Decl *, llvm::TrackingMDRef> DeclCache;
+ llvm::DenseMap<const NamespaceDecl *, llvm::TrackingMDRef> NameSpaceCache;
+ llvm::DenseMap<const NamespaceAliasDecl *, llvm::TrackingMDRef>
+ NamespaceAliasCache;
+ llvm::DenseMap<const Decl *, llvm::TrackingMDRef> StaticDataMemberCache;
/// Helper functions for getOrCreateType.
unsigned Checksum(const ObjCInterfaceDecl *InterfaceDecl);
@@ -158,14 +164,12 @@ class CGDebugInfo {
llvm::DIFile F,
llvm::DIType RecordTy);
- void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
- llvm::DIFile F,
- SmallVectorImpl<llvm::Value *> &E,
+ void CollectCXXMemberFunctions(const CXXRecordDecl *Decl, llvm::DIFile F,
+ SmallVectorImpl<llvm::Metadata *> &E,
llvm::DIType T);
- void CollectCXXBases(const CXXRecordDecl *Decl,
- llvm::DIFile F,
- SmallVectorImpl<llvm::Value *> &EltTys,
+ void CollectCXXBases(const CXXRecordDecl *Decl, llvm::DIFile F,
+ SmallVectorImpl<llvm::Metadata *> &EltTys,
llvm::DIType RecordTy);
llvm::DIArray
@@ -180,27 +184,29 @@ class CGDebugInfo {
llvm::DIType createFieldType(StringRef name, QualType type,
uint64_t sizeInBitsOverride, SourceLocation loc,
- AccessSpecifier AS, uint64_t offsetInBits,
+ AccessSpecifier AS,
+ uint64_t offsetInBits,
llvm::DIFile tunit,
- llvm::DIScope scope);
+ llvm::DIScope scope,
+ const RecordDecl* RD = nullptr);
// Helpers for collecting fields of a record.
void CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
- SmallVectorImpl<llvm::Value *> &E,
+ SmallVectorImpl<llvm::Metadata *> &E,
llvm::DIType RecordTy);
llvm::DIDerivedType CreateRecordStaticField(const VarDecl *Var,
- llvm::DIType RecordTy);
+ llvm::DIType RecordTy,
+ const RecordDecl* RD);
void CollectRecordNormalField(const FieldDecl *Field, uint64_t OffsetInBits,
llvm::DIFile F,
- SmallVectorImpl<llvm::Value *> &E,
- llvm::DIType RecordTy);
+ SmallVectorImpl<llvm::Metadata *> &E,
+ llvm::DIType RecordTy, const RecordDecl *RD);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
- SmallVectorImpl<llvm::Value *> &E,
+ SmallVectorImpl<llvm::Metadata *> &E,
llvm::DICompositeType RecordTy);
- void CollectVTableInfo(const CXXRecordDecl *Decl,
- llvm::DIFile F,
- SmallVectorImpl<llvm::Value *> &EltTys);
+ void CollectVTableInfo(const CXXRecordDecl *Decl, llvm::DIFile F,
+ SmallVectorImpl<llvm::Metadata *> &EltTys);
// CreateLexicalBlock - Create a new lexical block node and push it on
// the stack.
@@ -255,7 +261,8 @@ public:
void EmitDeclareOfBlockDeclRefVariable(const VarDecl *variable,
llvm::Value *storage,
CGBuilderTy &Builder,
- const CGBlockInfo &blockInfo);
+ const CGBlockInfo &blockInfo,
+ llvm::Instruction *InsertPoint = 0);
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
/// variable declaration.
@@ -266,7 +273,7 @@ public:
/// llvm.dbg.declare for the block-literal argument to a block
/// invocation function.
void EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
- llvm::Value *Arg,
+ llvm::Value *Arg, unsigned ArgNo,
llvm::Value *LocalAddr,
CGBuilderTy &Builder);
@@ -279,6 +286,9 @@ public:
/// \brief - Emit C++ using directive.
void EmitUsingDirective(const UsingDirectiveDecl &UD);
+ /// EmitExplicitCastType - Emit the type explicitly casted to.
+ void EmitExplicitCastType(QualType Ty);
+
/// \brief - Emit C++ using declaration.
void EmitUsingDecl(const UsingDecl &UD);
@@ -356,9 +366,9 @@ private:
llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
StringRef Name, uint64_t *Offset);
- /// \brief Retrieve the DIScope, if any, for the canonical form of this
+ /// \brief Retrieve the DIDescriptor, if any, for the canonical form of this
/// declaration.
- llvm::DIScope getDeclarationOrDefinition(const Decl *D);
+ llvm::DIDescriptor getDeclarationOrDefinition(const Decl *D);
/// getFunctionDeclaration - Return debug info descriptor to describe method
/// declaration for the given method definition.
@@ -369,6 +379,14 @@ private:
llvm::DIDerivedType
getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D);
+ /// \brief Create a DISubprogram describing the forward
+ /// decalration represented in the given FunctionDecl.
+ llvm::DISubprogram getFunctionForwardDeclaration(const FunctionDecl *FD);
+
+ /// \brief Create a DIGlobalVariable describing the forward
+ /// decalration represented in the given VarDecl.
+ llvm::DIGlobalVariable getGlobalVariableForwardDeclaration(const VarDecl *VD);
+
/// Return a global variable that represents one of the collection of
/// global variables created for an anonmyous union.
llvm::DIGlobalVariable
@@ -404,6 +422,21 @@ private:
/// \param Force Assume DebugColumnInfo option is true.
unsigned getColumnNumber(SourceLocation Loc, bool Force=false);
+ /// \brief Collect various properties of a FunctionDecl.
+ /// \param GD A GlobalDecl whose getDecl() must return a FunctionDecl.
+ void collectFunctionDeclProps(GlobalDecl GD,
+ llvm::DIFile Unit,
+ StringRef &Name, StringRef &LinkageName,
+ llvm::DIDescriptor &FDContext,
+ llvm::DIArray &TParamsArray,
+ unsigned &Flags);
+
+ /// \brief Collect various properties of a VarDecl.
+ void collectVarDeclProps(const VarDecl *VD, llvm::DIFile &Unit,
+ unsigned &LineNo, QualType &T,
+ StringRef &Name, StringRef &LinkageName,
+ llvm::DIDescriptor &VDContext);
+
/// internString - Allocate a copy of \p A using the DebugInfoNames allocator
/// and return a reference to it. If multiple arguments are given the strings
/// are concatenated.
@@ -415,27 +448,17 @@ private:
}
};
-/// SaveAndRestoreLocation - An RAII object saves the current location
-/// and automatically restores it to the original value.
-class SaveAndRestoreLocation {
+class ApplyDebugLocation {
protected:
- SourceLocation SavedLoc;
- CGDebugInfo *DI;
- CGBuilderTy &Builder;
-public:
- SaveAndRestoreLocation(CodeGenFunction &CGF, CGBuilderTy &B);
- /// Autorestore everything back to normal.
- ~SaveAndRestoreLocation();
-};
+ llvm::DebugLoc OriginalLocation;
+ CodeGenFunction &CGF;
-/// NoLocation - An RAII object that temporarily disables debug
-/// locations. This is useful for emitting instructions that should be
-/// counted towards the function prologue.
-class NoLocation : public SaveAndRestoreLocation {
public:
- NoLocation(CodeGenFunction &CGF, CGBuilderTy &B);
- /// Autorestore everything back to normal.
- ~NoLocation();
+ ApplyDebugLocation(CodeGenFunction &CGF,
+ SourceLocation TemporaryLocation = SourceLocation(),
+ bool ForceColumnInfo = false);
+ ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc);
+ ~ApplyDebugLocation();
};
/// ArtificialLocation - An RAII object that temporarily switches to
@@ -449,16 +472,9 @@ public:
/// This is necessary because passing an empty SourceLocation to
/// CGDebugInfo::setLocation() will result in the last valid location
/// being reused.
-class ArtificialLocation : public SaveAndRestoreLocation {
+class ArtificialLocation : public ApplyDebugLocation {
public:
- ArtificialLocation(CodeGenFunction &CGF, CGBuilderTy &B);
-
- /// Set the current location to line 0, but within the current scope
- /// (= the top of the LexicalBlockStack).
- void Emit();
-
- /// Autorestore everything back to normal.
- ~ArtificialLocation();
+ ArtificialLocation(CodeGenFunction &CGF);
};
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 91f804193049..766d2aa6ffb8 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -146,60 +146,71 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
return EmitAutoVarDecl(D);
}
-static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
- const char *Separator) {
- CodeGenModule &CGM = CGF.CGM;
-
- if (CGF.getLangOpts().CPlusPlus)
+static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
+ if (CGM.getLangOpts().CPlusPlus)
return CGM.getMangledName(&D).str();
- StringRef ContextName;
- if (!CGF.CurFuncDecl) {
- // Better be in a block declared in global scope.
- const NamedDecl *ND = cast<NamedDecl>(&D);
- const DeclContext *DC = ND->getDeclContext();
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC))
- ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
- else
- llvm_unreachable("Unknown context for block static var decl");
- } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl))
+ // If this isn't C++, we don't need a mangled name, just a pretty one.
+ assert(!D.isExternallyVisible() && "name shouldn't matter");
+ std::string ContextName;
+ const DeclContext *DC = D.getDeclContext();
+ if (const auto *FD = dyn_cast<FunctionDecl>(DC))
ContextName = CGM.getMangledName(FD);
- else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
- ContextName = CGF.CurFn->getName();
+ else if (const auto *BD = dyn_cast<BlockDecl>(DC))
+ ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
+ else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
+ ContextName = OMD->getSelector().getAsString();
else
llvm_unreachable("Unknown context for static var decl");
- return ContextName.str() + Separator + D.getNameAsString();
+ ContextName += "." + D.getNameAsString();
+ return ContextName;
}
-llvm::Constant *
-CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
- const char *Separator,
- llvm::GlobalValue::LinkageTypes Linkage) {
+llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
+ const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
+ // In general, we don't always emit static var decls once before we reference
+ // them. It is possible to reference them before emitting the function that
+ // contains them, and it is possible to emit the containing function multiple
+ // times.
+ if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
+ return ExistingGV;
+
QualType Ty = D.getType();
assert(Ty->isConstantSizeType() && "VLAs can't be static");
// Use the label if the variable is renamed with the asm-label extension.
std::string Name;
if (D.hasAttr<AsmLabelAttr>())
- Name = CGM.getMangledName(&D);
+ Name = getMangledName(&D);
else
- Name = GetStaticDeclName(*this, D, Separator);
+ Name = getStaticDeclName(*this, D);
- llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+ llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
unsigned AddrSpace =
- CGM.GetGlobalVarAddressSpace(&D, CGM.getContext().getTargetAddressSpace(Ty));
+ GetGlobalVarAddressSpace(&D, getContext().getTargetAddressSpace(Ty));
+
+ // Local address space cannot have an initializer.
+ llvm::Constant *Init = nullptr;
+ if (Ty.getAddressSpace() != LangAS::opencl_local)
+ Init = EmitNullConstant(Ty);
+ else
+ Init = llvm::UndefValue::get(LTy);
+
llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), LTy,
+ new llvm::GlobalVariable(getModule(), LTy,
Ty.isConstant(getContext()), Linkage,
- CGM.EmitNullConstant(D.getType()), Name, nullptr,
+ Init, Name, nullptr,
llvm::GlobalVariable::NotThreadLocal,
AddrSpace);
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
- CGM.setGlobalVisibility(GV, &D);
+ setGlobalVisibility(GV, &D);
+
+ if (supportsCOMDAT() && GV->isWeakForLinker())
+ GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
if (D.getTLSKind())
- CGM.setTLSMode(GV, D);
+ setTLSMode(GV, D);
if (D.isExternallyVisible()) {
if (D.hasAttr<DLLImportAttr>())
@@ -209,13 +220,44 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
}
// Make sure the result is of the correct type.
- unsigned ExpectedAddrSpace = CGM.getContext().getTargetAddressSpace(Ty);
+ unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(Ty);
+ llvm::Constant *Addr = GV;
if (AddrSpace != ExpectedAddrSpace) {
llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
- return llvm::ConstantExpr::getAddrSpaceCast(GV, PTy);
+ Addr = llvm::ConstantExpr::getAddrSpaceCast(GV, PTy);
}
- return GV;
+ setStaticLocalDeclAddress(&D, Addr);
+
+ // Ensure that the static local gets initialized by making sure the parent
+ // function gets emitted eventually.
+ const Decl *DC = cast<Decl>(D.getDeclContext());
+
+ // We can't name blocks or captured statements directly, so try to emit their
+ // parents.
+ if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
+ DC = DC->getNonClosureContext();
+ // FIXME: Ensure that global blocks get emitted.
+ if (!DC)
+ return Addr;
+ }
+
+ GlobalDecl GD;
+ if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
+ GD = GlobalDecl(CD, Ctor_Base);
+ else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
+ GD = GlobalDecl(DD, Dtor_Base);
+ else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
+ GD = GlobalDecl(FD);
+ else {
+ // Don't do anything for Obj-C method decls or global closures. We should
+ // never defer them.
+ assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
+ }
+ if (GD.getDecl())
+ (void)GetAddrOfGlobal(GD);
+
+ return Addr;
}
/// hasNontrivialDestruction - Determine whether a type's destruction is
@@ -298,16 +340,11 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// Check to see if we already have a global variable for this
// declaration. This can happen when double-emitting function
// bodies, e.g. with complete and base constructors.
- llvm::Constant *addr =
- CGM.getStaticLocalDeclAddress(&D);
-
- if (!addr)
- addr = CreateStaticVarDecl(D, ".", Linkage);
+ llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
// Store into LocalDeclMap before generating initializer to handle
// circular references.
DMEntry = addr;
- CGM.setStaticLocalDeclAddress(&D, addr);
// We can't have a VLA here, but we can have a pointer to a VLA,
// even though that doesn't really make any sense.
@@ -345,7 +382,7 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
DMEntry = castedAddr;
CGM.setStaticLocalDeclAddress(&D, castedAddr);
- CGM.reportGlobalToASan(var, D);
+ CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
@@ -562,10 +599,8 @@ static void drillIntoBlockVariable(CodeGenFunction &CGF,
lvalue.setAddress(CGF.BuildBlockByrefAddress(lvalue.getAddress(), var));
}
-void CodeGenFunction::EmitScalarInit(const Expr *init,
- const ValueDecl *D,
- LValue lvalue,
- bool capturedByInit) {
+void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit) {
Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
if (!lifetime) {
llvm::Value *value = EmitScalarExpr(init);
@@ -1035,7 +1070,7 @@ static bool isCapturedBy(const VarDecl &var, const Expr *e) {
/// \brief Determine whether the given initializer is trivial in the sense
/// that it requires no code to be generated.
-static bool isTrivialInitializer(const Expr *Init) {
+bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
if (!Init)
return true;
@@ -1055,6 +1090,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
if (emission.wasEmittedAsGlobal()) return;
const VarDecl &D = *emission.Variable;
+ ApplyDebugLocation DL(*this, D.getLocation());
QualType type = D.getType();
// If this local has an initializer, emit it now.
@@ -1129,7 +1165,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
} else {
// Otherwise, create a temporary global with the initializer then
// memcpy from the global to the alloca.
- std::string Name = GetStaticDeclName(*this, D, ".");
+ std::string Name = getStaticDeclName(CGM, D);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
llvm::GlobalValue::PrivateLinkage,
@@ -1158,10 +1194,8 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
/// \param alignment the alignment of the address
/// \param capturedByInit true if the variable is a __block variable
/// whose address is potentially changed by the initializer
-void CodeGenFunction::EmitExprAsInit(const Expr *init,
- const ValueDecl *D,
- LValue lvalue,
- bool capturedByInit) {
+void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit) {
QualType type = D->getType();
if (type->isReferenceType()) {
@@ -1636,7 +1670,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
if (CGM.getCodeGenOpts().getDebugInfo()
>= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
- DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, LocalAddr, Builder);
+ DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, ArgNo,
+ LocalAddr, Builder);
}
}
@@ -1656,7 +1691,9 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
DeclPtr = Arg->getType() == IRTy ? Arg : Builder.CreateBitCast(Arg, IRTy,
D.getName());
// Push a destructor cleanup for this parameter if the ABI requires it.
- if (!IsScalar &&
+ // Don't push a cleanup in a thunk for a method that will also emit a
+ // cleanup.
+ if (!IsScalar && !CurFuncIsThunk &&
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
if (RD && RD->hasNonTrivialDestructor())
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 94cfe211601f..3b379b7d258b 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -14,6 +14,7 @@
#include "CodeGenFunction.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
+#include "CGOpenMPRuntime.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Intrinsics.h"
@@ -96,7 +97,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
assert(!record->hasTrivialDestructor());
CXXDestructorDecl *dtor = record->getDestructor();
- function = CGM.GetAddrOfCXXDestructor(dtor, Dtor_Complete);
+ function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete);
argument = llvm::ConstantExpr::getBitCast(
addr, CGF.getTypes().ConvertType(type)->getPointerTo());
@@ -139,6 +140,10 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
QualType T = D.getType();
if (!T->isReferenceType()) {
+ if (getLangOpts().OpenMP && D.hasAttr<OMPThreadPrivateDeclAttr>())
+ (void)CGM.getOpenMPRuntime().EmitOMPThreadPrivateVarDefinition(
+ &D, DeclPtr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
+ PerformInit, this);
if (PerformInit)
EmitDeclInit(*this, D, DeclPtr);
if (CGM.isTypeConstant(D.getType(), true))
@@ -155,17 +160,11 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
}
-static llvm::Function *
-CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
- llvm::FunctionType *ty,
- const Twine &name,
- bool TLS = false);
-
/// Create a stub function, suitable for being passed to atexit,
/// which passes the given address to the given destructor function.
-static llvm::Constant *createAtExitStub(CodeGenModule &CGM, const VarDecl &VD,
- llvm::Constant *dtor,
- llvm::Constant *addr) {
+llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
// Get the destructor function type, void(*)(void).
llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
SmallString<256> FnName;
@@ -173,8 +172,8 @@ static llvm::Constant *createAtExitStub(CodeGenModule &CGM, const VarDecl &VD,
llvm::raw_svector_ostream Out(FnName);
CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
}
- llvm::Function *fn =
- CreateGlobalInitOrDestructFunction(CGM, ty, FnName.str());
+ llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(ty, FnName.str(),
+ VD.getLocation());
CodeGenFunction CGF(CGM);
@@ -198,7 +197,7 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
llvm::Constant *dtor,
llvm::Constant *addr) {
// Create a function which calls the destructor.
- llvm::Constant *dtorStub = createAtExitStub(CGM, VD, dtor, addr);
+ llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
// extern "C" int atexit(void (*f)(void));
llvm::FunctionType *atexitTy =
@@ -226,31 +225,28 @@ void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
}
-static llvm::Function *
-CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
- llvm::FunctionType *FTy,
- const Twine &Name, bool TLS) {
+llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
+ llvm::FunctionType *FTy, const Twine &Name, SourceLocation Loc, bool TLS) {
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- Name, &CGM.getModule());
- if (!CGM.getLangOpts().AppleKext && !TLS) {
+ Name, &getModule());
+ if (!getLangOpts().AppleKext && !TLS) {
// Set the section if needed.
- if (const char *Section =
- CGM.getTarget().getStaticInitSectionSpecifier())
+ if (const char *Section = getTarget().getStaticInitSectionSpecifier())
Fn->setSection(Section);
}
- Fn->setCallingConv(CGM.getRuntimeCC());
+ Fn->setCallingConv(getRuntimeCC());
- if (!CGM.getLangOpts().Exceptions)
+ if (!getLangOpts().Exceptions)
Fn->setDoesNotThrow();
- if (!CGM.getSanitizerBlacklist().isIn(*Fn)) {
- if (CGM.getLangOpts().Sanitize.Address)
+ if (!isInSanitizerBlacklist(Fn, Loc)) {
+ if (getLangOpts().Sanitize.has(SanitizerKind::Address))
Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
- if (CGM.getLangOpts().Sanitize.Thread)
+ if (getLangOpts().Sanitize.has(SanitizerKind::Thread))
Fn->addFnAttr(llvm::Attribute::SanitizeThread);
- if (CGM.getLangOpts().Sanitize.Memory)
+ if (getLangOpts().Sanitize.has(SanitizerKind::Memory))
Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
}
@@ -271,15 +267,7 @@ void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D,
addUsedGlobal(PtrArray);
// If the GV is already in a comdat group, then we have to join it.
- llvm::Comdat *C = GV->getComdat();
-
- // LinkOnce and Weak linkage are lowered down to a single-member comdat group.
- // Make an explicit group so we can join it.
- if (!C && (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage())) {
- C = TheModule.getOrInsertComdat(GV->getName());
- GV->setComdat(C);
- }
- if (C)
+ if (llvm::Comdat *C = GV->getComdat())
PtrArray->setComdat(C);
}
@@ -296,11 +284,15 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
// Create a variable initialization function.
llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(*this, FTy, FnName.str());
+ CreateGlobalInitOrDestructFunction(FTy, FnName.str(), D->getLocation());
auto *ISA = D->getAttr<InitSegAttr>();
CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
PerformInit);
+
+ llvm::GlobalVariable *COMDATKey =
+ supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr;
+
if (D->getTLSKind()) {
// FIXME: Should we support init_priority for thread_local?
// FIXME: Ideally, initialization of instantiated thread_local static data
@@ -309,6 +301,7 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
// FIXME: We only need to register one __cxa_thread_atexit function for the
// entire TU.
CXXThreadLocalInits.push_back(Fn);
+ CXXThreadLocalInitVars.push_back(Addr);
} else if (PerformInit && ISA) {
EmitPointerToInitFunc(D, Addr, Fn, ISA);
DelayedCXXInitPosition.erase(D);
@@ -316,8 +309,7 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size());
PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
DelayedCXXInitPosition.erase(D);
- } else if (D->getTemplateSpecializationKind() != TSK_ExplicitSpecialization &&
- D->getTemplateSpecializationKind() != TSK_Undeclared) {
+ } else if (isTemplateInstantiation(D->getTemplateSpecializationKind())) {
// C++ [basic.start.init]p2:
// Definitions of explicitly specialized class template static data
// members have ordered initialization. Other class template static data
@@ -326,11 +318,17 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
//
// As a consequence, we can put them into their own llvm.global_ctors entry.
//
- // In addition, put the initializer into a COMDAT group with the global
- // being initialized. On most platforms, this is a minor startup time
- // optimization. In the MS C++ ABI, there are no guard variables, so this
- // COMDAT key is required for correctness.
- AddGlobalCtor(Fn, 65535, Addr);
+ // If the global is externally visible, put the initializer into a COMDAT
+ // group with the global being initialized. On most platforms, this is a
+ // minor startup time optimization. In the MS C++ ABI, there are no guard
+ // variables, so this COMDAT key is required for correctness.
+ AddGlobalCtor(Fn, 65535, COMDATKey);
+ DelayedCXXInitPosition.erase(D);
+ } else if (D->hasAttr<SelectAnyAttr>()) {
+ // SelectAny globals will be comdat-folded. Put the initializer into a
+ // COMDAT group associated with the global, so the initializers get folded
+ // too.
+ AddGlobalCtor(Fn, 65535, COMDATKey);
DelayedCXXInitPosition.erase(D);
} else {
llvm::DenseMap<const Decl *, unsigned>::iterator I =
@@ -346,23 +344,11 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
}
void CodeGenModule::EmitCXXThreadLocalInitFunc() {
- llvm::Function *InitFn = nullptr;
- if (!CXXThreadLocalInits.empty()) {
- // Generate a guarded initialization function.
- llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
- InitFn = CreateGlobalInitOrDestructFunction(*this, FTy, "__tls_init",
- /*TLS*/ true);
- llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
- getModule(), Int8Ty, false, llvm::GlobalVariable::InternalLinkage,
- llvm::ConstantInt::get(Int8Ty, 0), "__tls_guard");
- Guard->setThreadLocal(true);
- CodeGenFunction(*this)
- .GenerateCXXGlobalInitFunc(InitFn, CXXThreadLocalInits, Guard);
- }
-
- getCXXABI().EmitThreadLocalInitFuncs(CXXThreadLocals, InitFn);
+ getCXXABI().EmitThreadLocalInitFuncs(
+ *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars);
CXXThreadLocalInits.clear();
+ CXXThreadLocalInitVars.clear();
CXXThreadLocals.clear();
}
@@ -379,7 +365,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
// Create our global initialization function.
if (!PrioritizedCXXGlobalInits.empty()) {
- SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits;
+ SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
PrioritizedCXXGlobalInits.end());
// Iterate over "chunks" of ctors with same priority and emit each chunk
@@ -398,10 +384,9 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
std::string PrioritySuffix = llvm::utostr(Priority);
// Priority is always <= 65535 (enforced by sema).
PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
- llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(*this, FTy,
- "_GLOBAL__I_" + PrioritySuffix);
-
+ llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
+ FTy, "_GLOBAL__I_" + PrioritySuffix);
+
for (; I < PrioE; ++I)
LocalCXXGlobalInits.push_back(I->second);
@@ -409,21 +394,27 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
AddGlobalCtor(Fn, Priority);
}
}
-
- // Include the filename in the symbol name. Including "sub_" matches gcc and
- // makes sure these symbols appear lexicographically behind the symbols with
- // priority emitted above.
+
+ SmallString<128> FileName;
SourceManager &SM = Context.getSourceManager();
- SmallString<128> FileName(llvm::sys::path::filename(
- SM.getFileEntryForID(SM.getMainFileID())->getName()));
+ if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ // Include the filename in the symbol name. Including "sub_" matches gcc and
+ // makes sure these symbols appear lexicographically behind the symbols with
+ // priority emitted above.
+ FileName = llvm::sys::path::filename(MainFile->getName());
+ } else {
+ FileName = SmallString<128>("<null>");
+ }
+
for (size_t i = 0; i < FileName.size(); ++i) {
// Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
// to be the set of C preprocessing numbers.
if (!isPreprocessingNumberBody(FileName[i]))
FileName[i] = '_';
}
+
llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
- *this, FTy, llvm::Twine("_GLOBAL__sub_I_", FileName));
+ FTy, llvm::Twine("_GLOBAL__sub_I_", FileName));
CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
AddGlobalCtor(Fn);
@@ -439,8 +430,7 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
// Create our global destructor function.
- llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__D_a");
+ llvm::Function *Fn = CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a");
CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
AddGlobalDtor(Fn);
@@ -455,6 +445,8 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
if (D->hasAttr<NoDebugAttr>())
DebugInfo = nullptr; // disable debug info indefinitely for this function
+ CurEHLocation = D->getLocStart();
+
StartFunction(GlobalDecl(D), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
FunctionArgList(), D->getLocation(),
@@ -474,14 +466,14 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
void
CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
- ArrayRef<llvm::Constant *> Decls,
+ ArrayRef<llvm::Function *> Decls,
llvm::GlobalVariable *Guard) {
{
- ArtificialLocation AL(*this, Builder);
+ ApplyDebugLocation NL(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(), FunctionArgList());
// Emit an artificial location for this function.
- AL.Emit();
+ ArtificialLocation AL(*this);
llvm::BasicBlock *ExitBlock = nullptr;
if (Guard) {
@@ -528,11 +520,11 @@ void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
&DtorsAndObjects) {
{
- ArtificialLocation AL(*this, Builder);
+ ApplyDebugLocation NL(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(), FunctionArgList());
// Emit an artificial location for this function.
- AL.Emit();
+ ArtificialLocation AL(*this);
// Emit the dtors, in reverse order from construction.
for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
@@ -561,8 +553,10 @@ llvm::Function *CodeGenFunction::generateDestroyHelper(
const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
getContext().VoidTy, args, FunctionType::ExtInfo(), /*variadic=*/false);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *fn =
- CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
+ llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, "__cxx_global_array_dtor", VD->getLocation());
+
+ CurEHLocation = VD->getLocStart();
StartFunction(VD, getContext().VoidTy, fn, FI, args);
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 1bbda5cbf09c..cb8eb8fa490c 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGObjCRuntime.h"
#include "TargetInfo.h"
@@ -52,15 +53,6 @@ static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
}
-static llvm::Constant *getReThrowFn(CodeGenModule &CGM) {
- // void __cxa_rethrow();
-
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
-
- return CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
-}
-
static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
// void *__cxa_get_exception_ptr(void*);
@@ -134,15 +126,17 @@ namespace {
// This function must have prototype void(void*).
const char *CatchallRethrowFn;
- static const EHPersonality &get(const LangOptions &Lang);
+ static const EHPersonality &get(CodeGenModule &CGM);
static const EHPersonality GNU_C;
static const EHPersonality GNU_C_SJLJ;
+ static const EHPersonality GNU_C_SEH;
static const EHPersonality GNU_ObjC;
static const EHPersonality GNUstep_ObjC;
static const EHPersonality GNU_ObjCXX;
static const EHPersonality NeXT_ObjC;
static const EHPersonality GNU_CPlusPlus;
static const EHPersonality GNU_CPlusPlus_SJLJ;
+ static const EHPersonality GNU_CPlusPlus_SEH;
};
}
@@ -150,28 +144,42 @@ const EHPersonality EHPersonality::GNU_C = { "__gcc_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNU_C_SJLJ = { "__gcc_personality_sj0", nullptr };
const EHPersonality
+EHPersonality::GNU_C_SEH = { "__gcc_personality_seh0", nullptr };
+const EHPersonality
EHPersonality::NeXT_ObjC = { "__objc_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNU_CPlusPlus = { "__gxx_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNU_CPlusPlus_SJLJ = { "__gxx_personality_sj0", nullptr };
const EHPersonality
+EHPersonality::GNU_CPlusPlus_SEH = { "__gxx_personality_seh0", nullptr };
+const EHPersonality
EHPersonality::GNU_ObjC = {"__gnu_objc_personality_v0", "objc_exception_throw"};
const EHPersonality
EHPersonality::GNU_ObjCXX = { "__gnustep_objcxx_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNUstep_ObjC = { "__gnustep_objc_personality_v0", nullptr };
-static const EHPersonality &getCPersonality(const LangOptions &L) {
+/// On Win64, use libgcc's SEH personality function. We fall back to dwarf on
+/// other platforms, unless the user asked for SjLj exceptions.
+static bool useLibGCCSEHPersonality(const llvm::Triple &T) {
+ return T.isOSWindows() && T.getArch() == llvm::Triple::x86_64;
+}
+
+static const EHPersonality &getCPersonality(const llvm::Triple &T,
+ const LangOptions &L) {
if (L.SjLjExceptions)
return EHPersonality::GNU_C_SJLJ;
+ else if (useLibGCCSEHPersonality(T))
+ return EHPersonality::GNU_C_SEH;
return EHPersonality::GNU_C;
}
-static const EHPersonality &getObjCPersonality(const LangOptions &L) {
+static const EHPersonality &getObjCPersonality(const llvm::Triple &T,
+ const LangOptions &L) {
switch (L.ObjCRuntime.getKind()) {
case ObjCRuntime::FragileMacOSX:
- return getCPersonality(L);
+ return getCPersonality(T, L);
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
return EHPersonality::NeXT_ObjC;
@@ -186,16 +194,19 @@ static const EHPersonality &getObjCPersonality(const LangOptions &L) {
llvm_unreachable("bad runtime kind");
}
-static const EHPersonality &getCXXPersonality(const LangOptions &L) {
+static const EHPersonality &getCXXPersonality(const llvm::Triple &T,
+ const LangOptions &L) {
if (L.SjLjExceptions)
return EHPersonality::GNU_CPlusPlus_SJLJ;
- else
- return EHPersonality::GNU_CPlusPlus;
+ else if (useLibGCCSEHPersonality(T))
+ return EHPersonality::GNU_CPlusPlus_SEH;
+ return EHPersonality::GNU_CPlusPlus;
}
/// Determines the personality function to use when both C++
/// and Objective-C exceptions are being caught.
-static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
+static const EHPersonality &getObjCXXPersonality(const llvm::Triple &T,
+ const LangOptions &L) {
switch (L.ObjCRuntime.getKind()) {
// The ObjC personality defers to the C++ personality for non-ObjC
// handlers. Unlike the C++ case, we use the same personality
@@ -207,7 +218,7 @@ static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
// In the fragile ABI, just use C++ exception handling and hope
// they're not doing crazy exception mixing.
case ObjCRuntime::FragileMacOSX:
- return getCXXPersonality(L);
+ return getCXXPersonality(T, L);
// The GCC runtime's personality function inherently doesn't support
// mixed EH. Use the C++ personality just to avoid returning null.
@@ -220,15 +231,17 @@ static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
llvm_unreachable("bad runtime kind");
}
-const EHPersonality &EHPersonality::get(const LangOptions &L) {
+const EHPersonality &EHPersonality::get(CodeGenModule &CGM) {
+ const llvm::Triple &T = CGM.getTarget().getTriple();
+ const LangOptions &L = CGM.getLangOpts();
if (L.CPlusPlus && L.ObjC1)
- return getObjCXXPersonality(L);
+ return getObjCXXPersonality(T, L);
else if (L.CPlusPlus)
- return getCXXPersonality(L);
+ return getCXXPersonality(T, L);
else if (L.ObjC1)
- return getObjCPersonality(L);
+ return getObjCPersonality(T, L);
else
- return getCPersonality(L);
+ return getCPersonality(T, L);
}
static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
@@ -305,8 +318,9 @@ void CodeGenModule::SimplifyPersonality() {
if (!LangOpts.ObjCRuntime.isNeXTFamily())
return;
- const EHPersonality &ObjCXX = EHPersonality::get(LangOpts);
- const EHPersonality &CXX = getCXXPersonality(LangOpts);
+ const EHPersonality &ObjCXX = EHPersonality::get(*this);
+ const EHPersonality &CXX =
+ getCXXPersonality(getTarget().getTriple(), LangOpts);
if (&ObjCXX == &CXX)
return;
@@ -403,14 +417,8 @@ llvm::Value *CodeGenFunction::getSelectorFromSlot() {
void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E,
bool KeepInsertionPoint) {
- if (CGM.getTarget().getTriple().isWindowsMSVCEnvironment()) {
- ErrorUnsupported(E, "throw expression");
- return;
- }
-
if (!E->getSubExpr()) {
- EmitNoreturnRuntimeCallOrInvoke(getReThrowFn(CGM),
- ArrayRef<llvm::Value*>());
+ CGM.getCXXABI().emitRethrow(*this, /*isNoReturn*/true);
// throw is an expression, and the expression emitters expect us
// to leave ourselves at a valid insertion point.
@@ -420,6 +428,11 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E,
return;
}
+ if (CGM.getTarget().getTriple().isKnownWindowsMSVCEnvironment()) {
+ ErrorUnsupported(E, "throw expression");
+ return;
+ }
+
QualType ThrowType = E->getSubExpr()->getType();
if (ThrowType->isObjCObjectPointerType()) {
@@ -457,7 +470,7 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E,
CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
if (!Record->hasTrivialDestructor()) {
CXXDestructorDecl *DtorD = Record->getDestructor();
- Dtor = CGM.GetAddrOfCXXDestructor(DtorD, Dtor_Complete);
+ Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
Dtor = llvm::ConstantExpr::getBitCast(Dtor, Int8PtrTy);
}
}
@@ -576,7 +589,7 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
}
void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
- if (CGM.getTarget().getTriple().isWindowsMSVCEnvironment()) {
+ if (CGM.getTarget().getTriple().isKnownWindowsMSVCEnvironment()) {
ErrorUnsupported(&S, "try statement");
return;
}
@@ -601,8 +614,9 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// existing compilers do, and it's not clear that the standard
// personality routine is capable of doing this right. See C++ DR 388:
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388
- QualType CaughtType = C->getCaughtType();
- CaughtType = CaughtType.getNonReferenceType().getUnqualifiedType();
+ Qualifiers CaughtTypeQuals;
+ QualType CaughtType = CGM.getContext().getUnqualifiedArrayType(
+ C->getCaughtType().getNonReferenceType(), CaughtTypeQuals);
llvm::Constant *TypeInfo = nullptr;
if (CaughtType->isObjCObjectPointerType())
@@ -720,18 +734,16 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Save the current IR generation state.
CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
- SaveAndRestoreLocation AutoRestoreLocation(*this, Builder);
- if (CGDebugInfo *DI = getDebugInfo())
- DI->EmitLocation(Builder, CurEHLocation);
+ ApplyDebugLocation AutoRestoreLocation(*this, CurEHLocation);
- const EHPersonality &personality = EHPersonality::get(getLangOpts());
+ const EHPersonality &personality = EHPersonality::get(CGM);
// Create and configure the landing pad.
llvm::BasicBlock *lpad = createBasicBlock("lpad");
EmitBlock(lpad);
llvm::LandingPadInst *LPadInst =
- Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, NULL),
+ Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, nullptr),
getOpaquePersonalityFn(CGM, personality), 0);
llvm::Value *LPadExn = Builder.CreateExtractValue(LPadInst, 0);
@@ -795,7 +807,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
}
// Check whether we already have a handler for this type.
- if (catchTypes.insert(handler.Type))
+ if (catchTypes.insert(handler.Type).second)
// If not, add it directly to the landingpad.
LPadInst->addClause(handler.Type);
}
@@ -1259,7 +1271,7 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// constructor function-try-block's catch handler (p14), so this
// really only applies to destructors.
if (doImplicitRethrow && HaveInsertPoint()) {
- EmitRuntimeCallOrInvoke(getReThrowFn(CGM));
+ CGM.getCXXABI().emitRethrow(*this, /*isNoReturn*/false);
Builder.CreateUnreachable();
Builder.ClearInsertionPoint();
}
@@ -1541,9 +1553,9 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
Builder.SetInsertPoint(TerminateLandingPad);
// Tell the backend that this is a landing pad.
- const EHPersonality &Personality = EHPersonality::get(CGM.getLangOpts());
+ const EHPersonality &Personality = EHPersonality::get(CGM);
llvm::LandingPadInst *LPadInst =
- Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, NULL),
+ Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, nullptr),
getOpaquePersonalityFn(CGM, Personality), 0);
LPadInst->addClause(getCatchAllValue(*this));
@@ -1600,7 +1612,7 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) {
EHResumeBlock = createBasicBlock("eh.resume");
Builder.SetInsertPoint(EHResumeBlock);
- const EHPersonality &Personality = EHPersonality::get(CGM.getLangOpts());
+ const EHPersonality &Personality = EHPersonality::get(CGM);
// This can always be a call because we necessarily didn't find
// anything on the EH stack which needs our help.
@@ -1619,7 +1631,7 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) {
llvm::Value *Sel = getSelectorFromSlot();
llvm::Type *LPadType = llvm::StructType::get(Exn->getType(),
- Sel->getType(), NULL);
+ Sel->getType(), nullptr);
llvm::Value *LPadVal = llvm::UndefValue::get(LPadType);
LPadVal = Builder.CreateInsertValue(LPadVal, Exn, 0, "lpad.val");
LPadVal = Builder.CreateInsertValue(LPadVal, Sel, 1, "lpad.val");
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 512b323ba109..ce7679c836e4 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -16,14 +16,16 @@
#include "CGCall.h"
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
+#include "CGOpenMPRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/DeclObjC.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
@@ -209,7 +211,6 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
case SD_Automatic:
case SD_FullExpression:
- assert(!ObjCARCReferenceLifetimeType->isArrayType());
CodeGenFunction::Destroyer *Destroy;
CleanupKind CleanupKind;
if (Lifetime == Qualifiers::OCL_Strong) {
@@ -267,8 +268,8 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
} else {
- CleanupFn =
- CGF.CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
+ CleanupFn = CGF.CGM.getAddrOfCXXStructor(ReferenceTemporaryDtor,
+ StructorType::Complete);
CleanupArg = cast<llvm::Constant>(ReferenceTemporary);
}
CGF.CGM.getCXXABI().registerGlobalDtor(
@@ -312,15 +313,16 @@ createReferenceTemporary(CodeGenFunction &CGF,
llvm_unreachable("unknown storage duration");
}
-LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
- const MaterializeTemporaryExpr *M) {
+LValue CodeGenFunction::
+EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
const Expr *E = M->GetTemporaryExpr();
+ // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
+ // as that will cause the lifetime adjustment to be lost for ARC
if (getLangOpts().ObjCAutoRefCount &&
M->getType()->isObjCLifetimeType() &&
M->getType().getObjCLifetime() != Qualifiers::OCL_None &&
M->getType().getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
- // FIXME: Fold this into the general case below.
llvm::Value *Object = createReferenceTemporary(*this, M, E);
LValue RefTempDst = MakeAddrLValue(Object, M->getType());
@@ -331,7 +333,21 @@ LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
Var->setInitializer(CGM.EmitNullConstant(E->getType()));
}
- EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
+ switch (getEvaluationKind(E->getType())) {
+ default: llvm_unreachable("expected scalar or aggregate expression");
+ case TEK_Scalar:
+ EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
+ break;
+ case TEK_Aggregate: {
+ CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
+ EmitAggExpr(E, AggValueSlot::forAddr(Object, Alignment,
+ E->getType().getQualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ break;
+ }
+ }
pushTemporaryCleanup(*this, M, E, Object);
return RefTempDst;
@@ -341,8 +357,8 @@ LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
SmallVector<SubobjectAdjustment, 2> Adjustments;
E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
- for (unsigned I = 0, N = CommaLHSs.size(); I != N; ++I)
- EmitIgnoredExpr(CommaLHSs[I]);
+ for (const auto &Ignored : CommaLHSs)
+ EmitIgnoredExpr(Ignored);
if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
if (opaque->getType()->isRecordType()) {
@@ -376,7 +392,7 @@ LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
Adjustment.DerivedToBase.BasePath->path_begin(),
Adjustment.DerivedToBase.BasePath->path_end(),
- /*NullCheckValue=*/ false);
+ /*NullCheckValue=*/ false, E->getExprLoc());
break;
case SubobjectAdjustment::FieldAdjustment: {
@@ -442,13 +458,15 @@ static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
}
bool CodeGenFunction::sanitizePerformTypeCheck() const {
- return SanOpts->Null | SanOpts->Alignment | SanOpts->ObjectSize |
- SanOpts->Vptr;
+ return SanOpts.has(SanitizerKind::Null) |
+ SanOpts.has(SanitizerKind::Alignment) |
+ SanOpts.has(SanitizerKind::ObjectSize) |
+ SanOpts.has(SanitizerKind::Vptr);
}
void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
- llvm::Value *Address,
- QualType Ty, CharUnits Alignment) {
+ llvm::Value *Address, QualType Ty,
+ CharUnits Alignment, bool SkipNullCheck) {
if (!sanitizePerformTypeCheck())
return;
@@ -460,26 +478,30 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
SanitizerScope SanScope(this);
- llvm::Value *Cond = nullptr;
+ SmallVector<std::pair<llvm::Value *, SanitizerKind>, 3> Checks;
llvm::BasicBlock *Done = nullptr;
- if (SanOpts->Null || TCK == TCK_DowncastPointer) {
+ bool AllowNullPointers = TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
+ TCK == TCK_UpcastToVirtualBase;
+ if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
+ !SkipNullCheck) {
// The glvalue must not be an empty glvalue.
- Cond = Builder.CreateICmpNE(
+ llvm::Value *IsNonNull = Builder.CreateICmpNE(
Address, llvm::Constant::getNullValue(Address->getType()));
- if (TCK == TCK_DowncastPointer) {
- // When performing a pointer downcast, it's OK if the value is null.
+ if (AllowNullPointers) {
+ // When performing pointer casts, it's OK if the value is null.
// Skip the remaining checks in that case.
Done = createBasicBlock("null");
llvm::BasicBlock *Rest = createBasicBlock("not.null");
- Builder.CreateCondBr(Cond, Rest, Done);
+ Builder.CreateCondBr(IsNonNull, Rest, Done);
EmitBlock(Rest);
- Cond = nullptr;
+ } else {
+ Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
}
}
- if (SanOpts->ObjectSize && !Ty->isIncompleteType()) {
+ if (SanOpts.has(SanitizerKind::ObjectSize) && !Ty->isIncompleteType()) {
uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
// The glvalue must refer to a large enough storage region.
@@ -493,12 +515,12 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::Value *LargeEnough =
Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min),
llvm::ConstantInt::get(IntPtrTy, Size));
- Cond = Cond ? Builder.CreateAnd(Cond, LargeEnough) : LargeEnough;
+ Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
}
uint64_t AlignVal = 0;
- if (SanOpts->Alignment) {
+ if (SanOpts.has(SanitizerKind::Alignment)) {
AlignVal = Alignment.getQuantity();
if (!Ty->isIncompleteType() && !AlignVal)
AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
@@ -510,18 +532,18 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
llvm::Value *Aligned =
Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
- Cond = Cond ? Builder.CreateAnd(Cond, Aligned) : Aligned;
+ Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
}
}
- if (Cond) {
+ if (Checks.size() > 0) {
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(Loc),
EmitCheckTypeDescriptor(Ty),
llvm::ConstantInt::get(SizeTy, AlignVal),
llvm::ConstantInt::get(Int8Ty, TCK)
};
- EmitCheck(Cond, "type_mismatch", StaticData, Address, CRK_Recoverable);
+ EmitCheck(Checks, "type_mismatch", StaticData, Address);
}
// If possible, check that the vptr indicates that there is a subobject of
@@ -533,9 +555,10 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// -- the [pointer or glvalue] is used to access a non-static data member
// or call a non-static member function
CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
- if (SanOpts->Vptr &&
+ if (SanOpts.has(SanitizerKind::Vptr) &&
(TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
- TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference) &&
+ TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
+ TCK == TCK_UpcastToVirtualBase) &&
RD && RD->hasDefinition() && RD->isDynamicClass()) {
// Compute a hash of the mangled name of the type.
//
@@ -548,7 +571,8 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
Out);
// Blacklist based on the mangled type.
- if (!CGM.getSanitizerBlacklist().isBlacklistedType(Out.str())) {
+ if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType(
+ Out.str())) {
llvm::hash_code TypeHash = hash_value(Out.str());
// Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
@@ -577,6 +601,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// hard work of checking whether the vptr is for an object of the right
// type. This will either fill in the cache and return, or produce a
// diagnostic.
+ llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(Loc),
EmitCheckTypeDescriptor(Ty),
@@ -584,9 +609,8 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::ConstantInt::get(Int8Ty, TCK)
};
llvm::Value *DynamicData[] = { Address, Hash };
- EmitCheck(Builder.CreateICmpEQ(CacheVal, Hash),
- "dynamic_type_cache_miss", StaticData, DynamicData,
- CRK_AlwaysRecoverable);
+ EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
+ "dynamic_type_cache_miss", StaticData, DynamicData);
}
}
@@ -654,7 +678,7 @@ static llvm::Value *getArrayIndexingBound(
void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
llvm::Value *Index, QualType IndexType,
bool Accessed) {
- assert(SanOpts->ArrayBounds &&
+ assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
"should not be called unless adding bounds checks");
SanitizerScope SanScope(this);
@@ -674,7 +698,8 @@ void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
};
llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
: Builder.CreateICmpULE(IndexVal, BoundVal);
- EmitCheck(Check, "out_of_bounds", StaticData, Index, CRK_Recoverable);
+ EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), "out_of_bounds",
+ StaticData, Index);
}
@@ -711,7 +736,6 @@ EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
return isPre ? IncVal : InVal;
}
-
//===----------------------------------------------------------------------===//
// LValue Expression Emission
//===----------------------------------------------------------------------===//
@@ -757,7 +781,7 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
LValue LV;
- if (SanOpts->ArrayBounds && isa<ArraySubscriptExpr>(E))
+ if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
else
LV = EmitLValue(E);
@@ -1130,8 +1154,11 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
CGM.DecorateInstruction(Load, TBAAPath, false/*ConvertTypeToTag*/);
}
- if ((SanOpts->Bool && hasBooleanRepresentation(Ty)) ||
- (SanOpts->Enum && Ty->getAs<EnumType>())) {
+ bool NeedsBoolCheck =
+ SanOpts.has(SanitizerKind::Bool) && hasBooleanRepresentation(Ty);
+ bool NeedsEnumCheck =
+ SanOpts.has(SanitizerKind::Enum) && Ty->getAs<EnumType>();
+ if (NeedsBoolCheck || NeedsEnumCheck) {
SanitizerScope SanScope(this);
llvm::APInt Min, End;
if (getRangeForType(*this, Ty, Min, End, true)) {
@@ -1151,8 +1178,9 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
EmitCheckSourceLocation(Loc),
EmitCheckTypeDescriptor(Ty)
};
- EmitCheck(Check, "load_invalid_value", StaticArgs, EmitCheckValue(Load),
- CRK_Recoverable);
+ SanitizerKind Kind = NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
+ EmitCheck(std::make_pair(Check, Kind), "load_invalid_value", StaticArgs,
+ EmitCheckValue(Load));
}
} else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
@@ -1361,12 +1389,34 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
return RValue::get(Vec);
}
+/// @brief Generates lvalue for partial ext_vector access.
+llvm::Value *CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
+ llvm::Value *VectorAddress = LV.getExtVectorAddr();
+ const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
+ QualType EQT = ExprVT->getElementType();
+ llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
+ llvm::Type *VectorElementPtrToTy = VectorElementTy->getPointerTo();
+
+ llvm::Value *CastToPointerElement =
+ Builder.CreateBitCast(VectorAddress,
+ VectorElementPtrToTy, "conv.ptr.element");
+
+ const llvm::Constant *Elts = LV.getExtVectorElts();
+ unsigned ix = getAccessedFieldNo(0, Elts);
+
+ llvm::Value *VectorBasePtrPlusIx =
+ Builder.CreateInBoundsGEP(CastToPointerElement,
+ llvm::ConstantInt::get(SizeTy, ix), "add.ptr");
+
+ return VectorBasePtrPlusIx;
+}
+
/// @brief Load of global gamed gegisters are always calls to intrinsics.
RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
"Bad type for register variable");
- llvm::MDNode *RegName = dyn_cast<llvm::MDNode>(LV.getGlobalReg());
- assert(RegName && "Register LValue is not metadata");
+ llvm::MDNode *RegName = cast<llvm::MDNode>(
+ cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
// We accept integer and pointer types only
llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
@@ -1376,7 +1426,8 @@ RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
llvm::Type *Types[] = { Ty };
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
- llvm::Value *Call = Builder.CreateCall(F, RegName);
+ llvm::Value *Call = Builder.CreateCall(
+ F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
if (OrigTy->isPointerTy())
Call = Builder.CreateIntToPtr(Call, OrigTy);
return RValue::get(Call);
@@ -1626,7 +1677,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
"Bad type for register variable");
- llvm::MDNode *RegName = dyn_cast<llvm::MDNode>(Dst.getGlobalReg());
+ llvm::MDNode *RegName = cast<llvm::MDNode>(
+ cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
assert(RegName && "Register LValue is not metadata");
// We accept integer and pointer types only
@@ -1640,7 +1692,8 @@ void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
llvm::Value *Value = Src.getScalarVal();
if (OrigTy->isPointerTy())
Value = Builder.CreatePtrToInt(Value, Ty);
- Builder.CreateCall2(F, RegName, Value);
+ Builder.CreateCall2(F, llvm::MetadataAsValue::get(Ty->getContext(), RegName),
+ Value);
}
// setObjCGCLValueClass - sets class of the lvalue for the purpose of
@@ -1751,12 +1804,21 @@ EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
}
+static LValue EmitThreadPrivateVarDeclLValue(
+ CodeGenFunction &CGF, const VarDecl *VD, QualType T, llvm::Value *V,
+ llvm::Type *RealVarTy, CharUnits Alignment, SourceLocation Loc) {
+ V = CGF.CGM.getOpenMPRuntime().getOMPAddrOfThreadPrivate(CGF, VD, V, Loc);
+ V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
+ return CGF.MakeAddrLValue(V, T, Alignment);
+}
+
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
const Expr *E, const VarDecl *VD) {
QualType T = E->getType();
// If it's thread_local, emit a call to its wrapper function instead.
- if (VD->getTLSKind() == VarDecl::TLS_Dynamic)
+ if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
+ CGF.CGM.getCXXABI().usesThreadWrapperFunction())
return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
@@ -1764,6 +1826,11 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
LValue LV;
+ // Emit reference to the private copy of the variable if it is an OpenMP
+ // threadprivate variable.
+ if (CGF.getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>())
+ return EmitThreadPrivateVarDeclLValue(CGF, VD, T, V, RealVarTy, Alignment,
+ E->getExprLoc());
if (VD->getType()->isReferenceType()) {
llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
LI->setAlignment(Alignment.getQuantity());
@@ -1821,10 +1888,12 @@ static LValue EmitGlobalNamedRegister(const VarDecl *VD,
if (M->getNumOperands() == 0) {
llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
Asm->getLabel());
- llvm::Value *Ops[] = { Str };
+ llvm::Metadata *Ops[] = {Str};
M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
}
- return LValue::MakeGlobalReg(M->getOperand(0), VD->getType(), Alignment);
+ return LValue::MakeGlobalReg(
+ llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)),
+ VD->getType(), Alignment);
}
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
@@ -1850,6 +1919,22 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// FIXME: Eventually we will want to emit vector element references.
return MakeAddrLValue(Val, T, Alignment);
}
+
+ // Check for captured variables.
+ if (E->refersToEnclosingVariableOrCapture()) {
+ if (auto *FD = LambdaCaptureFields.lookup(VD))
+ return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
+ else if (CapturedStmtInfo) {
+ if (auto *V = LocalDeclMap.lookup(VD))
+ return MakeAddrLValue(V, T, Alignment);
+ else
+ return EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
+ CapturedStmtInfo->getContextValue());
+ }
+ assert(isa<BlockDecl>(CurCodeDecl));
+ return MakeAddrLValue(GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>()),
+ T, Alignment);
+ }
}
// FIXME: We should be able to assert this for FunctionDecls as well!
@@ -1874,22 +1959,14 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
llvm::Value *V = LocalDeclMap.lookup(VD);
if (!V && VD->isStaticLocal())
- V = CGM.getStaticLocalDeclAddress(VD);
-
- // Use special handling for lambdas.
- if (!V) {
- if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) {
- return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
- } else if (CapturedStmtInfo) {
- if (const FieldDecl *FD = CapturedStmtInfo->lookup(VD))
- return EmitCapturedFieldLValue(*this, FD,
- CapturedStmtInfo->getContextValue());
- }
+ V = CGM.getOrCreateStaticVarDecl(
+ *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
- assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
- return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
- T, Alignment);
- }
+ // Check if variable is threadprivate.
+ if (V && getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>())
+ return EmitThreadPrivateVarDeclLValue(
+ *this, VD, T, V, getTypes().ConvertTypeForMem(VD->getType()),
+ Alignment, E->getExprLoc());
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
@@ -2001,86 +2078,21 @@ LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
E->getType());
}
-static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
- SmallString<32>& Target) {
- Target.resize(CharByteWidth * (Source.size() + 1));
- char *ResultPtr = &Target[0];
- const UTF8 *ErrorPtr;
- bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr);
- (void)success;
- assert(success);
- Target.resize(ResultPtr - &Target[0]);
-}
-
LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
- switch (E->getIdentType()) {
- default:
- return EmitUnsupportedLValue(E, "predefined expression");
-
- case PredefinedExpr::Func:
- case PredefinedExpr::Function:
- case PredefinedExpr::LFunction:
- case PredefinedExpr::FuncDName:
- case PredefinedExpr::FuncSig:
- case PredefinedExpr::PrettyFunction: {
- PredefinedExpr::IdentType IdentType = E->getIdentType();
- std::string GVName;
-
- // FIXME: We should use the string literal mangling for the Microsoft C++
- // ABI so that strings get merged.
- switch (IdentType) {
- default: llvm_unreachable("Invalid type");
- case PredefinedExpr::Func: GVName = "__func__."; break;
- case PredefinedExpr::Function: GVName = "__FUNCTION__."; break;
- case PredefinedExpr::FuncDName: GVName = "__FUNCDNAME__."; break;
- case PredefinedExpr::FuncSig: GVName = "__FUNCSIG__."; break;
- case PredefinedExpr::LFunction: GVName = "L__FUNCTION__."; break;
- case PredefinedExpr::PrettyFunction: GVName = "__PRETTY_FUNCTION__."; break;
- }
-
- StringRef FnName = CurFn->getName();
- if (FnName.startswith("\01"))
- FnName = FnName.substr(1);
- GVName += FnName;
-
- // If this is outside of a function use the top level decl.
- const Decl *CurDecl = CurCodeDecl;
- if (!CurDecl || isa<VarDecl>(CurDecl))
- CurDecl = getContext().getTranslationUnitDecl();
-
- const Type *ElemType = E->getType()->getArrayElementTypeNoTypeQual();
- std::string FunctionName;
- if (isa<BlockDecl>(CurDecl)) {
- // Blocks use the mangled function name.
- // FIXME: ComputeName should handle blocks.
- FunctionName = FnName.str();
- } else if (isa<CapturedDecl>(CurDecl)) {
- // For a captured statement, the function name is its enclosing
- // function name not the one compiler generated.
- FunctionName = PredefinedExpr::ComputeName(IdentType, CurDecl);
- } else {
- FunctionName = PredefinedExpr::ComputeName(IdentType, CurDecl);
- assert(cast<ConstantArrayType>(E->getType())->getSize() - 1 ==
- FunctionName.size() &&
- "Computed __func__ length differs from type!");
- }
-
- llvm::Constant *C;
- if (ElemType->isWideCharType()) {
- SmallString<32> RawChars;
- ConvertUTF8ToWideString(
- getContext().getTypeSizeInChars(ElemType).getQuantity(), FunctionName,
- RawChars);
- StringLiteral *SL = StringLiteral::Create(
- getContext(), RawChars, StringLiteral::Wide,
- /*Pascal = */ false, E->getType(), E->getLocation());
- C = CGM.GetAddrOfConstantStringFromLiteral(SL);
- } else {
- C = CGM.GetAddrOfConstantCString(FunctionName, GVName.c_str(), 1);
- }
+ auto SL = E->getFunctionName();
+ assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
+ StringRef FnName = CurFn->getName();
+ if (FnName.startswith("\01"))
+ FnName = FnName.substr(1);
+ StringRef NameItems[] = {
+ PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName};
+ std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
+ if (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)) {
+ auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str(), 1);
return MakeAddrLValue(C, E->getType());
}
- }
+ auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
+ return MakeAddrLValue(C, E->getType());
}
/// Emit a type description suitable for use by a runtime sanitizer library. The
@@ -2115,7 +2127,7 @@ llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
(intptr_t)T.getAsOpaquePtr(),
StringRef(), StringRef(), None, Buffer,
- ArrayRef<intptr_t>());
+ None);
llvm::Constant *Components[] = {
Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
@@ -2127,7 +2139,7 @@ llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
CGM.getModule(), Descriptor->getType(),
/*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
GV->setUnnamedAddr(true);
- CGM.disableSanitizerForGlobal(GV);
+ CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
// Remember the descriptor for this type.
CGM.setTypeDescriptorInMap(T, GV);
@@ -2177,7 +2189,7 @@ llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
if (PLoc.isValid()) {
auto FilenameGV = CGM.GetAddrOfConstantCString(PLoc.getFilename(), ".src");
- CGM.disableSanitizerForGlobal(FilenameGV);
+ CGM.getSanitizerMetadata()->disableSanitizerForGlobal(FilenameGV);
Filename = FilenameGV;
Line = PLoc.getLine();
Column = PLoc.getColumn();
@@ -2192,39 +2204,126 @@ llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
return llvm::ConstantStruct::getAnon(Data);
}
-void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName,
- ArrayRef<llvm::Constant *> StaticArgs,
- ArrayRef<llvm::Value *> DynamicArgs,
- CheckRecoverableKind RecoverKind) {
- assert(SanOpts != &SanitizerOptions::Disabled);
- assert(IsSanitizerScope);
+namespace {
+/// \brief Specify under what conditions this check can be recovered
+enum class CheckRecoverableKind {
+ /// Always terminate program execution if this check fails.
+ Unrecoverable,
+ /// Check supports recovering, runtime has both fatal (noreturn) and
+ /// non-fatal handlers for this check.
+ Recoverable,
+ /// Runtime conditionally aborts, always need to support recovery.
+ AlwaysRecoverable
+};
+}
- if (CGM.getCodeGenOpts().SanitizeUndefinedTrapOnError) {
- assert (RecoverKind != CRK_AlwaysRecoverable &&
- "Runtime call required for AlwaysRecoverable kind!");
- return EmitTrapCheck(Checked);
+static CheckRecoverableKind getRecoverableKind(SanitizerKind Kind) {
+ switch (Kind) {
+ case SanitizerKind::Vptr:
+ return CheckRecoverableKind::AlwaysRecoverable;
+ case SanitizerKind::Return:
+ case SanitizerKind::Unreachable:
+ return CheckRecoverableKind::Unrecoverable;
+ default:
+ return CheckRecoverableKind::Recoverable;
}
+}
- llvm::BasicBlock *Cont = createBasicBlock("cont");
+static void emitCheckHandlerCall(CodeGenFunction &CGF,
+ llvm::FunctionType *FnType,
+ ArrayRef<llvm::Value *> FnArgs,
+ StringRef CheckName,
+ CheckRecoverableKind RecoverKind, bool IsFatal,
+ llvm::BasicBlock *ContBB) {
+ assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
+ bool NeedsAbortSuffix =
+ IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
+ std::string FnName = ("__ubsan_handle_" + CheckName +
+ (NeedsAbortSuffix ? "_abort" : "")).str();
+ bool MayReturn =
+ !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
- llvm::BasicBlock *Handler = createBasicBlock("handler." + CheckName);
+ llvm::AttrBuilder B;
+ if (!MayReturn) {
+ B.addAttribute(llvm::Attribute::NoReturn)
+ .addAttribute(llvm::Attribute::NoUnwind);
+ }
+ B.addAttribute(llvm::Attribute::UWTable);
- llvm::Instruction *Branch = Builder.CreateCondBr(Checked, Cont, Handler);
+ llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(
+ FnType, FnName,
+ llvm::AttributeSet::get(CGF.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex, B));
+ llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
+ if (!MayReturn) {
+ HandlerCall->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+ } else {
+ CGF.Builder.CreateBr(ContBB);
+ }
+}
+void CodeGenFunction::EmitCheck(
+ ArrayRef<std::pair<llvm::Value *, SanitizerKind>> Checked,
+ StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs,
+ ArrayRef<llvm::Value *> DynamicArgs) {
+ assert(IsSanitizerScope);
+ assert(Checked.size() > 0);
+
+ llvm::Value *FatalCond = nullptr;
+ llvm::Value *RecoverableCond = nullptr;
+ for (int i = 0, n = Checked.size(); i < n; ++i) {
+ llvm::Value *Check = Checked[i].first;
+ llvm::Value *&Cond =
+ CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
+ ? RecoverableCond
+ : FatalCond;
+ Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
+ }
+
+ llvm::Value *JointCond;
+ if (FatalCond && RecoverableCond)
+ JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
+ else
+ JointCond = FatalCond ? FatalCond : RecoverableCond;
+ assert(JointCond);
+
+ CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
+ assert(SanOpts.has(Checked[0].second));
+#ifndef NDEBUG
+ for (int i = 1, n = Checked.size(); i < n; ++i) {
+ assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
+ "All recoverable kinds in a single check must be same!");
+ assert(SanOpts.has(Checked[i].second));
+ }
+#endif
+
+ if (CGM.getCodeGenOpts().SanitizeUndefinedTrapOnError) {
+ assert(RecoverKind != CheckRecoverableKind::AlwaysRecoverable &&
+ "Runtime call required for AlwaysRecoverable kind!");
+ // Assume that -fsanitize-undefined-trap-on-error overrides
+ // -fsanitize-recover= options, as we can only print meaningful error
+ // message and recover if we have a runtime support.
+ return EmitTrapCheck(JointCond);
+ }
+
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+ llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
+ llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
// Give hint that we very much don't expect to execute the handler
// Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
llvm::MDBuilder MDHelper(getLLVMContext());
llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
+ EmitBlock(Handlers);
- EmitBlock(Handler);
-
+ // Emit handler arguments and create handler function type.
llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
auto *InfoPtr =
new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
llvm::GlobalVariable::PrivateLinkage, Info);
InfoPtr->setUnnamedAddr(true);
- CGM.disableSanitizerForGlobal(InfoPtr);
+ CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
SmallVector<llvm::Value *, 4> Args;
SmallVector<llvm::Type *, 4> ArgTypes;
@@ -2241,34 +2340,27 @@ void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName,
ArgTypes.push_back(IntPtrTy);
}
- bool Recover = RecoverKind == CRK_AlwaysRecoverable ||
- (RecoverKind == CRK_Recoverable &&
- CGM.getCodeGenOpts().SanitizeRecover);
-
llvm::FunctionType *FnType =
llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
- llvm::AttrBuilder B;
- if (!Recover) {
- B.addAttribute(llvm::Attribute::NoReturn)
- .addAttribute(llvm::Attribute::NoUnwind);
- }
- B.addAttribute(llvm::Attribute::UWTable);
- // Checks that have two variants use a suffix to differentiate them
- bool NeedsAbortSuffix = RecoverKind != CRK_Unrecoverable &&
- !CGM.getCodeGenOpts().SanitizeRecover;
- std::string FunctionName = ("__ubsan_handle_" + CheckName +
- (NeedsAbortSuffix? "_abort" : "")).str();
- llvm::Value *Fn = CGM.CreateRuntimeFunction(
- FnType, FunctionName,
- llvm::AttributeSet::get(getLLVMContext(),
- llvm::AttributeSet::FunctionIndex, B));
- llvm::CallInst *HandlerCall = EmitNounwindRuntimeCall(Fn, Args);
- if (Recover) {
- Builder.CreateBr(Cont);
+ if (!FatalCond || !RecoverableCond) {
+ // Simple case: we need to generate a single handler call, either
+ // fatal, or non-fatal.
+ emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind,
+ (FatalCond != nullptr), Cont);
} else {
- HandlerCall->setDoesNotReturn();
- Builder.CreateUnreachable();
+ // Emit two handler calls: first one for set of unrecoverable checks,
+ // another one for recoverable.
+ llvm::BasicBlock *NonFatalHandlerBB =
+ createBasicBlock("non_fatal." + CheckName);
+ llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
+ Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
+ EmitBlock(FatalHandlerBB);
+ emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, true,
+ NonFatalHandlerBB);
+ EmitBlock(NonFatalHandlerBB);
+ emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, false,
+ Cont);
}
EmitBlock(Cont);
@@ -2318,12 +2410,13 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
QualType IdxTy = E->getIdx()->getType();
bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
- if (SanOpts->ArrayBounds)
+ if (SanOpts.has(SanitizerKind::ArrayBounds))
EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
// If the base is a vector type, then we are forming a vector element lvalue
// with this subscript.
- if (E->getBase()->getType()->isVectorType()) {
+ if (E->getBase()->getType()->isVectorType() &&
+ !isa<ExtVectorElementExpr>(E->getBase())) {
// Emit the vector as an lvalue to get its address.
LValue LHS = EmitLValue(E->getBase());
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
@@ -2339,8 +2432,17 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// size is a VLA or Objective-C interface.
llvm::Value *Address = nullptr;
CharUnits ArrayAlignment;
- if (const VariableArrayType *vla =
- getContext().getAsVariableArrayType(E->getType())) {
+ if (isa<ExtVectorElementExpr>(E->getBase())) {
+ LValue LV = EmitLValue(E->getBase());
+ Address = EmitExtVectorElementLValue(LV);
+ Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
+ const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
+ QualType EQT = ExprVT->getElementType();
+ return MakeAddrLValue(Address, EQT,
+ getContext().getTypeAlignInChars(EQT));
+ }
+ else if (const VariableArrayType *vla =
+ getContext().getAsVariableArrayType(E->getType())) {
// The base must be a pointer, which is not an aggregate. Emit
// it. It needs to be emitted first in case it's what captures
// the VLA bounds.
@@ -2879,10 +2981,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
llvm::Value *This = LV.getAddress();
// Perform the derived-to-base conversion
- llvm::Value *Base =
- GetAddressOfBaseClass(This, DerivedClassDecl,
- E->path_begin(), E->path_end(),
- /*NullCheckValue=*/false);
+ llvm::Value *Base = GetAddressOfBaseClass(
+ This, DerivedClassDecl, E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false, E->getExprLoc());
return MakeAddrLValue(Base, E->getType());
}
@@ -2958,18 +3059,15 @@ RValue CodeGenFunction::EmitRValueForField(LValue LV,
RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
- if (CGDebugInfo *DI = getDebugInfo()) {
- SourceLocation Loc = E->getLocStart();
- // Force column info to be generated so we can differentiate
- // multiple call sites on the same line in the debug info.
- // FIXME: This is insufficient. Two calls coming from the same macro
- // expansion will still get the same line/column and break debug info. It's
- // possible that LLVM can be fixed to not rely on this uniqueness, at which
- // point this workaround can be removed.
- const FunctionDecl* Callee = E->getDirectCallee();
- bool ForceColumnInfo = Callee && Callee->isInlineSpecified();
- DI->EmitLocation(Builder, Loc, ForceColumnInfo);
- }
+ // Force column info to be generated so we can differentiate
+ // multiple call sites on the same line in the debug info.
+ // FIXME: This is insufficient. Two calls coming from the same macro
+ // expansion will still get the same line/column and break debug info. It's
+ // possible that LLVM can be fixed to not rely on this uniqueness, at which
+ // point this workaround can be removed.
+ ApplyDebugLocation DL(*this, E->getLocStart(),
+ E->getDirectCallee() &&
+ E->getDirectCallee()->isInlineSpecified());
// Builtins never have block type.
if (E->getCallee()->getType()->isBlockPointerType())
@@ -2984,7 +3082,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
const Decl *TargetDecl = E->getCalleeDecl();
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
if (unsigned builtinID = FD->getBuiltinID())
- return EmitBuiltinExpr(FD, builtinID, E);
+ return EmitBuiltinExpr(FD, builtinID, E, ReturnValue);
}
if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
@@ -3046,8 +3144,8 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
}
llvm::Value *Callee = EmitScalarExpr(E->getCallee());
- return EmitCall(E->getCallee()->getType(), Callee, E->getLocStart(),
- ReturnValue, E->arg_begin(), E->arg_end(), TargetDecl);
+ return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
+ TargetDecl);
}
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
@@ -3218,11 +3316,8 @@ LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
}
RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
- SourceLocation CallLoc,
- ReturnValueSlot ReturnValue,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
- const Decl *TargetDecl) {
+ const CallExpr *E, ReturnValueSlot ReturnValue,
+ const Decl *TargetDecl, llvm::Value *Chain) {
// Get the actual function type. The callee type will always be a pointer to
// function type or a block pointer type.
assert(CalleeType->isFunctionPointerType() &&
@@ -3243,7 +3338,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
if (const FunctionDecl* FD = dyn_cast_or_null<const FunctionDecl>(TargetDecl))
ForceColumnInfo = FD->isInlineSpecified();
- if (getLangOpts().CPlusPlus && SanOpts->Function &&
+ if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) &&
(!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
if (llvm::Constant *PrefixSig =
CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
@@ -3275,14 +3370,11 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
llvm::Value *CalleeRTTIMatch =
Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst);
llvm::Constant *StaticData[] = {
- EmitCheckSourceLocation(CallLoc),
+ EmitCheckSourceLocation(E->getLocStart()),
EmitCheckTypeDescriptor(CalleeType)
};
- EmitCheck(CalleeRTTIMatch,
- "function_type_mismatch",
- StaticData,
- Callee,
- CRK_Recoverable);
+ EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function),
+ "function_type_mismatch", StaticData, Callee);
Builder.CreateBr(Cont);
EmitBlock(Cont);
@@ -3290,11 +3382,15 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
}
CallArgList Args;
- EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd,
+ if (Chain)
+ Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)),
+ CGM.getContext().VoidPtrTy);
+ EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arg_begin(),
+ E->arg_end(), E->getDirectCallee(), /*ParamsToSkip*/ 0,
ForceColumnInfo);
- const CGFunctionInfo &FnInfo =
- CGM.getTypes().arrangeFreeFunctionCall(Args, FnType);
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
+ Args, FnType, /*isChainCall=*/Chain);
// C99 6.5.2.2p6:
// If the expression that denotes the called function has a type
@@ -3313,7 +3409,10 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
// through an unprototyped function type works like a *non-variadic*
// call. The way we make this work is to cast to the exact type
// of the promoted arguments.
- if (isa<FunctionNoProtoType>(FnType)) {
+ //
+ // Chain calls use this same code path to add the invisible chain parameter
+ // to the function type.
+ if (isa<FunctionNoProtoType>(FnType) || Chain) {
llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
CalleeTy = CalleeTy->getPointerTo();
Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 7aacee4d6ba1..6d63b3ae9cf8 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -24,29 +24,28 @@
using namespace clang;
using namespace CodeGen;
-RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
- SourceLocation CallLoc,
- llvm::Value *Callee,
- ReturnValueSlot ReturnValue,
- llvm::Value *This,
- llvm::Value *ImplicitParam,
- QualType ImplicitParamTy,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
+static RequiredArgs commonEmitCXXMemberOrOperatorCall(
+ CodeGenFunction &CGF, const CXXMethodDecl *MD, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam,
+ QualType ImplicitParamTy, const CallExpr *CE, CallArgList &Args) {
+ assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
+ isa<CXXOperatorCallExpr>(CE));
assert(MD->isInstance() &&
- "Trying to emit a member call expr on a static method!");
+ "Trying to emit a member or operator call expr on a static method!");
// C++11 [class.mfct.non-static]p2:
// If a non-static member function of a class X is called for an object that
// is not of type X, or of a type derived from X, the behavior is undefined.
- EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
- : TCK_MemberCall,
- CallLoc, This, getContext().getRecordType(MD->getParent()));
-
- CallArgList Args;
+ SourceLocation CallLoc;
+ if (CE)
+ CallLoc = CE->getExprLoc();
+ CGF.EmitTypeCheck(
+ isa<CXXConstructorDecl>(MD) ? CodeGenFunction::TCK_ConstructorCall
+ : CodeGenFunction::TCK_MemberCall,
+ CallLoc, This, CGF.getContext().getRecordType(MD->getParent()));
// Push the this ptr.
- Args.add(RValue::get(This), MD->getThisType(getContext()));
+ Args.add(RValue::get(This), MD->getThisType(CGF.getContext()));
// If there is an implicit parameter (e.g. VTT), emit it.
if (ImplicitParam) {
@@ -55,14 +54,45 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
-
+
// And the rest of the call args.
- EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+ if (CE) {
+ // Special case: skip first argument of CXXOperatorCall (it is "this").
+ unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
+ CGF.EmitCallArgs(Args, FPT, CE->arg_begin() + ArgsToSkip, CE->arg_end(),
+ CE->getDirectCallee());
+ } else {
+ assert(
+ FPT->getNumParams() == 0 &&
+ "No CallExpr specified for function with non-zero number of arguments");
+ }
+ return required;
+}
+RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
+ const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
+ llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
+ const CallExpr *CE) {
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ CallArgList Args;
+ RequiredArgs required = commonEmitCXXMemberOrOperatorCall(
+ *this, MD, Callee, ReturnValue, This, ImplicitParam, ImplicitParamTy, CE,
+ Args);
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
Callee, ReturnValue, Args, MD);
}
+RValue CodeGenFunction::EmitCXXStructorCall(
+ const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
+ llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
+ const CallExpr *CE, StructorType Type) {
+ CallArgList Args;
+ commonEmitCXXMemberOrOperatorCall(*this, MD, Callee, ReturnValue, This,
+ ImplicitParam, ImplicitParamTy, CE, Args);
+ return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(MD, Type),
+ Callee, ReturnValue, Args, MD);
+}
+
static CXXRecordDecl *getCXXRecord(const Expr *E) {
QualType T = E->getType();
if (const PointerType *PTy = T->getAs<PointerType>())
@@ -86,14 +116,27 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
if (MD->isStatic()) {
// The method is static, emit it as we would a regular call.
llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
- return EmitCall(getContext().getPointerType(MD->getType()), Callee,
- CE->getLocStart(), ReturnValue, CE->arg_begin(),
- CE->arg_end());
+ return EmitCall(getContext().getPointerType(MD->getType()), Callee, CE,
+ ReturnValue);
}
- // Compute the object pointer.
+ bool HasQualifier = ME->hasQualifier();
+ NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
+ bool IsArrow = ME->isArrow();
const Expr *Base = ME->getBase();
- bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
+
+ return EmitCXXMemberOrOperatorMemberCallExpr(
+ CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
+}
+
+RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
+ const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
+ bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
+ const Expr *Base) {
+ assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
+
+ // Compute the object pointer.
+ bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
const CXXMethodDecl *DevirtualizedMethod = nullptr;
if (CanUseVirtualCall && CanDevirtualizeMemberFunctionCall(Base, MD)) {
@@ -102,7 +145,15 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
assert(DevirtualizedMethod);
const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
const Expr *Inner = Base->ignoreParenBaseCasts();
- if (getCXXRecord(Inner) == DevirtualizedClass)
+ if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
+ MD->getReturnType().getCanonicalType())
+ // If the return types are not the same, this might be a case where more
+ // code needs to run to compensate for it. For example, the derived
+ // method might return a type that inherits form from the return
+ // type of MD and has a prefix.
+ // For now we just avoid devirtualizing these covariant cases.
+ DevirtualizedMethod = nullptr;
+ else if (getCXXRecord(Inner) == DevirtualizedClass)
// If the class of the Inner expression is where the dynamic method
// is defined, build the this pointer from it.
Base = Inner;
@@ -113,19 +164,10 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
// we don't have support for that yet, so do a virtual call.
DevirtualizedMethod = nullptr;
}
- // If the return types are not the same, this might be a case where more
- // code needs to run to compensate for it. For example, the derived
- // method might return a type that inherits form from the return
- // type of MD and has a prefix.
- // For now we just avoid devirtualizing these covariant cases.
- if (DevirtualizedMethod &&
- DevirtualizedMethod->getReturnType().getCanonicalType() !=
- MD->getReturnType().getCanonicalType())
- DevirtualizedMethod = nullptr;
}
llvm::Value *This;
- if (ME->isArrow())
+ if (IsArrow)
This = EmitScalarExpr(Base);
else
This = EmitLValue(Base).getAddress();
@@ -137,34 +179,40 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
return RValue::get(nullptr);
- if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
- // We don't like to generate the trivial copy/move assignment operator
- // when it isn't necessary; just produce the proper effect here.
- llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitAggregateAssign(This, RHS, CE->getType());
- return RValue::get(This);
- }
-
- if (isa<CXXConstructorDecl>(MD) &&
- cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
- // Trivial move and copy ctor are the same.
- llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
- CE->arg_begin(), CE->arg_end());
- return RValue::get(This);
+ if (!MD->getParent()->mayInsertExtraPadding()) {
+ if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
+ // We don't like to generate the trivial copy/move assignment operator
+ // when it isn't necessary; just produce the proper effect here.
+ // Special case: skip first argument of CXXOperatorCall (it is "this").
+ unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
+ llvm::Value *RHS =
+ EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
+ EmitAggregateAssign(This, RHS, CE->getType());
+ return RValue::get(This);
+ }
+
+ if (isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
+ // Trivial move and copy ctor are the same.
+ assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitAggregateCopy(This, RHS, CE->arg_begin()->getType());
+ return RValue::get(This);
+ }
+ llvm_unreachable("unknown trivial member function");
}
- llvm_unreachable("unknown trivial member function");
}
// Compute the function type we're calling.
- const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD;
+ const CXXMethodDecl *CalleeDecl =
+ DevirtualizedMethod ? DevirtualizedMethod : MD;
const CGFunctionInfo *FInfo = nullptr;
- if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
- FInfo = &CGM.getTypes().arrangeCXXDestructor(Dtor,
- Dtor_Complete);
- else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
- FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor,
- Ctor_Complete);
+ if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
+ FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
+ Dtor, StructorType::Complete);
+ else if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
+ FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
+ Ctor, StructorType::Complete);
else
FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
@@ -184,22 +232,21 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
"Destructor shouldn't have explicit parameters");
assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
if (UseVirtualCall) {
- CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
- CE->getExprLoc(), This);
+ CGM.getCXXABI().EmitVirtualDestructorCall(
+ *this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE));
} else {
- if (getLangOpts().AppleKext &&
- MD->isVirtual() &&
- ME->hasQualifier())
- Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
+ if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
+ Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
- Callee = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete, FInfo, Ty);
+ Callee =
+ CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty);
else {
const CXXDestructorDecl *DDtor =
cast<CXXDestructorDecl>(DevirtualizedMethod);
Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
}
- EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
- /*ImplicitParam=*/nullptr, QualType(), nullptr,nullptr);
+ EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
+ /*ImplicitParam=*/nullptr, QualType(), CE);
}
return RValue::get(nullptr);
}
@@ -209,10 +256,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
} else if (UseVirtualCall) {
Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty);
} else {
- if (getLangOpts().AppleKext &&
- MD->isVirtual() &&
- ME->hasQualifier())
- Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
+ if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
+ Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
Callee = CGM.GetAddrOfFunction(MD, Ty);
else {
@@ -225,9 +270,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
*this, MD, This, UseVirtualCall);
}
- return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
- /*ImplicitParam=*/nullptr, QualType(),
- CE->arg_begin(), CE->arg_end());
+ return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
+ /*ImplicitParam=*/nullptr, QualType(), CE);
}
RValue
@@ -275,7 +319,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
// And the rest of the call args
- EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
+ EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getDirectCallee());
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
Callee, ReturnValue, Args);
}
@@ -286,21 +330,9 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
ReturnValueSlot ReturnValue) {
assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
- LValue LV = EmitLValue(E->getArg(0));
- llvm::Value *This = LV.getAddress();
-
- if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
- MD->isTrivial()) {
- llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
- QualType Ty = E->getType();
- EmitAggregateAssign(This, Src, Ty);
- return RValue::get(This);
- }
-
- llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
- return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This,
- /*ImplicitParam=*/nullptr, QualType(),
- E->arg_begin() + 1, E->arg_end());
+ return EmitCXXMemberOrOperatorMemberCallExpr(
+ E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
+ /*IsArrow=*/false, E->getArg(0));
}
RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
@@ -392,8 +424,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
if (const ConstantArrayType *arrayType
= getContext().getAsConstantArrayType(E->getType())) {
- EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
- E->arg_begin(), E->arg_end());
+ EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(), E);
} else {
CXXCtorType Type = Ctor_Complete;
bool ForVirtualBase = false;
@@ -420,7 +451,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
// Call the constructor.
EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
- E->arg_begin(), E->arg_end());
+ E);
}
}
@@ -445,7 +476,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
assert(!getContext().getAsConstantArrayType(E->getType())
&& "EmitSynthesizedCXXCopyCtor - Copied-in Array");
- EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E->arg_begin(), E->arg_end());
+ EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
}
static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
@@ -726,9 +757,8 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
switch (CGF.getEvaluationKind(AllocType)) {
case TEK_Scalar:
- CGF.EmitScalarInit(Init, nullptr, CGF.MakeAddrLValue(NewPtr, AllocType,
- Alignment),
- false);
+ CGF.EmitScalarInit(Init, nullptr,
+ CGF.MakeAddrLValue(NewPtr, AllocType, Alignment), false);
return;
case TEK_Complex:
CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
@@ -895,8 +925,7 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
NumElements = Builder.CreateSub(
NumElements,
llvm::ConstantInt::get(NumElements->getType(), InitListElements));
- EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr,
- CCE->arg_begin(), CCE->arg_end(),
+ EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
CCE->requiresZeroInitialization());
return;
}
@@ -987,6 +1016,7 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
llvm::Value *NewPtr,
llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie) {
+ ApplyDebugLocation DL(CGF, E->getStartLoc());
if (E->isArray())
CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements,
AllocSizeWithoutCookie);
@@ -1003,9 +1033,9 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
llvm::Instruction *CallOrInvoke;
llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
RValue RV =
- CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(Args, CalleeType),
- CalleeAddr, ReturnValueSlot(), Args,
- Callee, &CallOrInvoke);
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
+ Args, CalleeType, /*chainCall=*/false),
+ CalleeAddr, ReturnValueSlot(), Args, Callee, &CallOrInvoke);
/// C++1y [expr.new]p10:
/// [In a new-expression,] an implementation is allowed to omit a call
@@ -1226,15 +1256,14 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Value *allocSize =
EmitCXXNewAllocSize(*this, E, minElements, numElements,
allocSizeWithoutCookie);
-
+
allocatorArgs.add(RValue::get(allocSize), sizeType);
// We start at 1 here because the first argument (the allocation size)
// has already been emitted.
- EmitCallArgs(allocatorArgs, allocatorType->isVariadic(),
- allocatorType->param_type_begin() + 1,
- allocatorType->param_type_end(), E->placement_arg_begin(),
- E->placement_arg_end());
+ EmitCallArgs(allocatorArgs, allocatorType, E->placement_arg_begin(),
+ E->placement_arg_end(), /* CalleeDecl */ nullptr,
+ /*ParamsToSkip*/ 1);
// Emit the allocation call. If the allocator is a global placement
// operator, just "inline" it directly.
@@ -1386,12 +1415,19 @@ namespace {
};
}
+void
+CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
+ llvm::Value *CompletePtr,
+ QualType ElementType) {
+ EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
+ OperatorDelete, ElementType);
+}
+
/// Emit the code for deleting a single object.
static void EmitObjectDelete(CodeGenFunction &CGF,
- const FunctionDecl *OperatorDelete,
+ const CXXDeleteExpr *DE,
llvm::Value *Ptr,
- QualType ElementType,
- bool UseGlobalDelete) {
+ QualType ElementType) {
// Find the destructor for the type, if applicable. If the
// destructor is virtual, we'll just emit the vcall and return.
const CXXDestructorDecl *Dtor = nullptr;
@@ -1401,29 +1437,8 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
Dtor = RD->getDestructor();
if (Dtor->isVirtual()) {
- if (UseGlobalDelete) {
- // If we're supposed to call the global delete, make sure we do so
- // even if the destructor throws.
-
- // Derive the complete-object pointer, which is what we need
- // to pass to the deallocation function.
- llvm::Value *completePtr =
- CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType);
-
- CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
- completePtr, OperatorDelete,
- ElementType);
- }
-
- // FIXME: Provide a source location here.
- CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
- CGF.CGM.getCXXABI().EmitVirtualDestructorCall(CGF, Dtor, DtorType,
- SourceLocation(), Ptr);
-
- if (UseGlobalDelete) {
- CGF.PopCleanupBlock();
- }
-
+ CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
+ Dtor);
return;
}
}
@@ -1432,6 +1447,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
// Make sure that we call delete even if the dtor throws.
// This doesn't have to a conditional cleanup because we're going
// to pop it off in a second.
+ const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
Ptr, OperatorDelete, ElementType);
@@ -1608,8 +1624,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
if (E->isArrayForm()) {
EmitArrayDelete(*this, E, Ptr, DeleteTy);
} else {
- EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
- E->isGlobalDelete());
+ EmitObjectDelete(*this, E, Ptr, DeleteTy);
}
EmitBlock(DeleteEnd);
@@ -1800,19 +1815,23 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
RunCleanupsScope Scope(*this);
- LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
- Slot.getAlignment());
+ LValue SlotLV =
+ MakeAddrLValue(Slot.getAddr(), E->getType(), Slot.getAlignment());
CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
e = E->capture_init_end();
i != e; ++i, ++CurField) {
// Emit initialization
-
LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
- ArrayRef<VarDecl *> ArrayIndexes;
- if (CurField->getType()->isArrayType())
- ArrayIndexes = E->getCaptureInitIndexVars(i);
- EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
+ if (CurField->hasCapturedVLAType()) {
+ auto VAT = CurField->getCapturedVLAType();
+ EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
+ } else {
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (CurField->getType()->isArrayType())
+ ArrayIndexes = E->getCaptureInitIndexVars(i);
+ EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
+ }
}
}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 7244b9e4d1eb..1580bbe6a294 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -15,9 +15,13 @@
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Metadata.h"
#include <algorithm>
using namespace clang;
using namespace CodeGen;
@@ -142,7 +146,7 @@ public:
// FIXME: CompoundLiteralExpr
- ComplexPairTy EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy);
+ ComplexPairTy EmitCast(CastKind CK, Expr *Op, QualType DestTy);
ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
// Unlike for scalars, we don't have to worry about function->ptr demotion
// here.
@@ -230,6 +234,9 @@ public:
ComplexPairTy EmitBinMul(const BinOpInfo &Op);
ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
+ ComplexPairTy EmitComplexBinOpLibCall(StringRef LibCallName,
+ const BinOpInfo &Op);
+
ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
return EmitBinAdd(EmitBinOps(E));
}
@@ -326,8 +333,7 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
/// EmitStoreOfComplex - Store the specified real/imag parts into the
/// specified value pointer.
-void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val,
- LValue lvalue,
+void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue,
bool isInit) {
if (lvalue.getType()->isAtomicType())
return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit);
@@ -410,7 +416,7 @@ ComplexPairTy ComplexExprEmitter::EmitScalarToComplexCast(llvm::Value *Val,
return ComplexPairTy(Val, llvm::Constant::getNullValue(Val->getType()));
}
-ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
+ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
QualType DestTy) {
switch (CK) {
case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
@@ -528,9 +534,15 @@ ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
if (Op.LHS.first->getType()->isFloatingPointTy()) {
ResR = Builder.CreateFAdd(Op.LHS.first, Op.RHS.first, "add.r");
- ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ if (Op.LHS.second && Op.RHS.second)
+ ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ else
+ ResI = Op.LHS.second ? Op.LHS.second : Op.RHS.second;
+ assert(ResI && "Only one operand may be real!");
} else {
ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ assert(Op.LHS.second && Op.RHS.second &&
+ "Both operands of integer complex operators must be complex!");
ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i");
}
return ComplexPairTy(ResR, ResI);
@@ -539,63 +551,222 @@ ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
llvm::Value *ResR, *ResI;
if (Op.LHS.first->getType()->isFloatingPointTy()) {
- ResR = Builder.CreateFSub(Op.LHS.first, Op.RHS.first, "sub.r");
- ResI = Builder.CreateFSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ ResR = Builder.CreateFSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ if (Op.LHS.second && Op.RHS.second)
+ ResI = Builder.CreateFSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ else
+ ResI = Op.LHS.second ? Op.LHS.second
+ : Builder.CreateFNeg(Op.RHS.second, "sub.i");
+ assert(ResI && "Only one operand may be real!");
} else {
- ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ assert(Op.LHS.second && Op.RHS.second &&
+ "Both operands of integer complex operators must be complex!");
ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i");
}
return ComplexPairTy(ResR, ResI);
}
+/// \brief Emit a libcall for a binary operation on complex types.
+ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
+ const BinOpInfo &Op) {
+ CallArgList Args;
+ Args.add(RValue::get(Op.LHS.first),
+ Op.Ty->castAs<ComplexType>()->getElementType());
+ Args.add(RValue::get(Op.LHS.second),
+ Op.Ty->castAs<ComplexType>()->getElementType());
+ Args.add(RValue::get(Op.RHS.first),
+ Op.Ty->castAs<ComplexType>()->getElementType());
+ Args.add(RValue::get(Op.RHS.second),
+ Op.Ty->castAs<ComplexType>()->getElementType());
+
+ // We *must* use the full CG function call building logic here because the
+ // complex type has special ABI handling. We also should not forget about
+ // special calling convention which may be used for compiler builtins.
+ const CGFunctionInfo &FuncInfo =
+ CGF.CGM.getTypes().arrangeFreeFunctionCall(
+ Op.Ty, Args, FunctionType::ExtInfo(/* No CC here - will be added later */),
+ RequiredArgs::All);
+ llvm::FunctionType *FTy = CGF.CGM.getTypes().GetFunctionType(FuncInfo);
+ llvm::Constant *Func = CGF.CGM.CreateBuiltinFunction(FTy, LibCallName);
+ llvm::Instruction *Call;
+
+ RValue Res = CGF.EmitCall(FuncInfo, Func, ReturnValueSlot(), Args,
+ nullptr, &Call);
+ cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getBuiltinCC());
+ cast<llvm::CallInst>(Call)->setDoesNotThrow();
+
+ return Res.getComplexVal();
+}
+
+/// \brief Lookup the libcall name for a given floating point type complex
+/// multiply.
+static StringRef getComplexMultiplyLibCallName(llvm::Type *Ty) {
+ switch (Ty->getTypeID()) {
+ default:
+ llvm_unreachable("Unsupported floating point type!");
+ case llvm::Type::HalfTyID:
+ return "__mulhc3";
+ case llvm::Type::FloatTyID:
+ return "__mulsc3";
+ case llvm::Type::DoubleTyID:
+ return "__muldc3";
+ case llvm::Type::PPC_FP128TyID:
+ return "__multc3";
+ case llvm::Type::X86_FP80TyID:
+ return "__mulxc3";
+ case llvm::Type::FP128TyID:
+ return "__multc3";
+ }
+}
+// See C11 Annex G.5.1 for the semantics of multiplicative operators on complex
+// typed values.
ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
using llvm::Value;
Value *ResR, *ResI;
+ llvm::MDBuilder MDHelper(CGF.getLLVMContext());
if (Op.LHS.first->getType()->isFloatingPointTy()) {
- Value *ResRl = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl");
- Value *ResRr = Builder.CreateFMul(Op.LHS.second, Op.RHS.second,"mul.rr");
- ResR = Builder.CreateFSub(ResRl, ResRr, "mul.r");
+ // The general formulation is:
+ // (a + ib) * (c + id) = (a * c - b * d) + i(a * d + b * c)
+ //
+ // But we can fold away components which would be zero due to a real
+ // operand according to C11 Annex G.5.1p2.
+ // FIXME: C11 also provides for imaginary types which would allow folding
+ // still more of this within the type system.
+
+ if (Op.LHS.second && Op.RHS.second) {
+ // If both operands are complex, emit the core math directly, and then
+ // test for NaNs. If we find NaNs in the result, we delegate to a libcall
+ // to carefully re-compute the correct infinity representation if
+ // possible. The expectation is that the presence of NaNs here is
+ // *extremely* rare, and so the cost of the libcall is almost irrelevant.
+ // This is good, because the libcall re-computes the core multiplication
+ // exactly the same as we do here and re-tests for NaNs in order to be
+ // a generic complex*complex libcall.
+
+ // First compute the four products.
+ Value *AC = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul_ac");
+ Value *BD = Builder.CreateFMul(Op.LHS.second, Op.RHS.second, "mul_bd");
+ Value *AD = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul_ad");
+ Value *BC = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul_bc");
+
+ // The real part is the difference of the first two, the imaginary part is
+ // the sum of the second.
+ ResR = Builder.CreateFSub(AC, BD, "mul_r");
+ ResI = Builder.CreateFAdd(AD, BC, "mul_i");
+
+ // Emit the test for the real part becoming NaN and create a branch to
+ // handle it. We test for NaN by comparing the number to itself.
+ Value *IsRNaN = Builder.CreateFCmpUNO(ResR, ResR, "isnan_cmp");
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock("complex_mul_cont");
+ llvm::BasicBlock *INaNBB = CGF.createBasicBlock("complex_mul_imag_nan");
+ llvm::Instruction *Branch = Builder.CreateCondBr(IsRNaN, INaNBB, ContBB);
+ llvm::BasicBlock *OrigBB = Branch->getParent();
+
+ // Give hint that we very much don't expect to see NaNs.
+ // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
+ llvm::MDNode *BrWeight = MDHelper.createBranchWeights(1, (1U << 20) - 1);
+ Branch->setMetadata(llvm::LLVMContext::MD_prof, BrWeight);
+
+ // Now test the imaginary part and create its branch.
+ CGF.EmitBlock(INaNBB);
+ Value *IsINaN = Builder.CreateFCmpUNO(ResI, ResI, "isnan_cmp");
+ llvm::BasicBlock *LibCallBB = CGF.createBasicBlock("complex_mul_libcall");
+ Branch = Builder.CreateCondBr(IsINaN, LibCallBB, ContBB);
+ Branch->setMetadata(llvm::LLVMContext::MD_prof, BrWeight);
+
+ // Now emit the libcall on this slowest of the slow paths.
+ CGF.EmitBlock(LibCallBB);
+ Value *LibCallR, *LibCallI;
+ std::tie(LibCallR, LibCallI) = EmitComplexBinOpLibCall(
+ getComplexMultiplyLibCallName(Op.LHS.first->getType()), Op);
+ Builder.CreateBr(ContBB);
+
+ // Finally continue execution by phi-ing together the different
+ // computation paths.
+ CGF.EmitBlock(ContBB);
+ llvm::PHINode *RealPHI = Builder.CreatePHI(ResR->getType(), 3, "real_mul_phi");
+ RealPHI->addIncoming(ResR, OrigBB);
+ RealPHI->addIncoming(ResR, INaNBB);
+ RealPHI->addIncoming(LibCallR, LibCallBB);
+ llvm::PHINode *ImagPHI = Builder.CreatePHI(ResI->getType(), 3, "imag_mul_phi");
+ ImagPHI->addIncoming(ResI, OrigBB);
+ ImagPHI->addIncoming(ResI, INaNBB);
+ ImagPHI->addIncoming(LibCallI, LibCallBB);
+ return ComplexPairTy(RealPHI, ImagPHI);
+ }
+ assert((Op.LHS.second || Op.RHS.second) &&
+ "At least one operand must be complex!");
+
+ // If either of the operands is a real rather than a complex, the
+ // imaginary component is ignored when computing the real component of the
+ // result.
+ ResR = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl");
- Value *ResIl = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il");
- Value *ResIr = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir");
- ResI = Builder.CreateFAdd(ResIl, ResIr, "mul.i");
+ ResI = Op.LHS.second
+ ? Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il")
+ : Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir");
} else {
+ assert(Op.LHS.second && Op.RHS.second &&
+ "Both operands of integer complex operators must be complex!");
Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl");
- Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr");
- ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
+ Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second, "mul.rr");
+ ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il");
Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir");
- ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
+ ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
}
return ComplexPairTy(ResR, ResI);
}
+// See C11 Annex G.5.1 for the semantics of multiplicative operators on complex
+// typed values.
ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
llvm::Value *DSTr, *DSTi;
- if (Op.LHS.first->getType()->isFloatingPointTy()) {
- // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
- llvm::Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); // a*c
- llvm::Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); // b*d
- llvm::Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2); // ac+bd
-
- llvm::Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr); // c*c
- llvm::Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi); // d*d
- llvm::Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5); // cc+dd
-
- llvm::Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr); // b*c
- llvm::Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi); // a*d
- llvm::Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8); // bc-ad
+ if (LHSr->getType()->isFloatingPointTy()) {
+ // If we have a complex operand on the RHS, we delegate to a libcall to
+ // handle all of the complexities and minimize underflow/overflow cases.
+ //
+ // FIXME: We would be able to avoid the libcall in many places if we
+ // supported imaginary types in addition to complex types.
+ if (RHSi) {
+ BinOpInfo LibCallOp = Op;
+ // If LHS was a real, supply a null imaginary part.
+ if (!LHSi)
+ LibCallOp.LHS.second = llvm::Constant::getNullValue(LHSr->getType());
+
+ StringRef LibCallName;
+ switch (LHSr->getType()->getTypeID()) {
+ default:
+ llvm_unreachable("Unsupported floating point type!");
+ case llvm::Type::HalfTyID:
+ return EmitComplexBinOpLibCall("__divhc3", LibCallOp);
+ case llvm::Type::FloatTyID:
+ return EmitComplexBinOpLibCall("__divsc3", LibCallOp);
+ case llvm::Type::DoubleTyID:
+ return EmitComplexBinOpLibCall("__divdc3", LibCallOp);
+ case llvm::Type::PPC_FP128TyID:
+ return EmitComplexBinOpLibCall("__divtc3", LibCallOp);
+ case llvm::Type::X86_FP80TyID:
+ return EmitComplexBinOpLibCall("__divxc3", LibCallOp);
+ case llvm::Type::FP128TyID:
+ return EmitComplexBinOpLibCall("__divtc3", LibCallOp);
+ }
+ }
+ assert(LHSi && "Can have at most one non-complex operand!");
- DSTr = Builder.CreateFDiv(Tmp3, Tmp6);
- DSTi = Builder.CreateFDiv(Tmp9, Tmp6);
+ DSTr = Builder.CreateFDiv(LHSr, RHSr);
+ DSTi = Builder.CreateFDiv(LHSi, RHSr);
} else {
+ assert(Op.LHS.second && Op.RHS.second &&
+ "Both operands of integer complex operators must be complex!");
// (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); // a*c
llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); // b*d
@@ -626,8 +797,15 @@ ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
BinOpInfo Ops;
- Ops.LHS = Visit(E->getLHS());
- Ops.RHS = Visit(E->getRHS());
+ if (E->getLHS()->getType()->isRealFloatingType())
+ Ops.LHS = ComplexPairTy(CGF.EmitScalarExpr(E->getLHS()), nullptr);
+ else
+ Ops.LHS = Visit(E->getLHS());
+ if (E->getRHS()->getType()->isRealFloatingType())
+ Ops.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
+ else
+ Ops.RHS = Visit(E->getRHS());
+
Ops.Ty = E->getType();
return Ops;
}
@@ -647,12 +825,19 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
// __block variables need to have the rhs evaluated first, plus this should
// improve codegen a little.
OpInfo.Ty = E->getComputationResultType();
+ QualType ComplexElementTy = cast<ComplexType>(OpInfo.Ty)->getElementType();
// The RHS should have been converted to the computation type.
- assert(OpInfo.Ty->isAnyComplexType());
- assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty,
- E->getRHS()->getType()));
- OpInfo.RHS = Visit(E->getRHS());
+ if (E->getRHS()->getType()->isRealFloatingType()) {
+ assert(
+ CGF.getContext()
+ .hasSameUnqualifiedType(ComplexElementTy, E->getRHS()->getType()));
+ OpInfo.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
+ } else {
+ assert(CGF.getContext()
+ .hasSameUnqualifiedType(OpInfo.Ty, E->getRHS()->getType()));
+ OpInfo.RHS = Visit(E->getRHS());
+ }
LValue LHS = CGF.EmitLValue(E->getLHS());
@@ -662,7 +847,15 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
} else {
llvm::Value *LHSVal = CGF.EmitLoadOfScalar(LHS, E->getExprLoc());
- OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
+ // For floating point real operands we can directly pass the scalar form
+ // to the binary operator emission and potentially get more efficient code.
+ if (LHSTy->isRealFloatingType()) {
+ if (!CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, LHSTy))
+ LHSVal = CGF.EmitScalarConversion(LHSVal, LHSTy, ComplexElementTy);
+ OpInfo.LHS = ComplexPairTy(LHSVal, nullptr);
+ } else {
+ OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
+ }
}
// Expand the binary operator.
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index b508dcb446fb..54f7eee6791e 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -104,16 +104,7 @@ AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
// Round up the field offset to the alignment of the field type.
CharUnits AlignedNextFieldOffsetInChars =
- NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
-
- if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
- assert(!Packed && "Alignment is wrong even with a packed struct!");
-
- // Convert the struct to a packed struct.
- ConvertStructToPacked();
-
- AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
- }
+ NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
// We need to append padding.
@@ -122,6 +113,24 @@ AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
assert(NextFieldOffsetInChars == FieldOffsetInChars &&
"Did not add enough padding!");
+ AlignedNextFieldOffsetInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
+ }
+
+ if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
+ assert(!Packed && "Alignment is wrong even with a packed struct!");
+
+ // Convert the struct to a packed struct.
+ ConvertStructToPacked();
+
+ // After we pack the struct, we may need to insert padding.
+ if (NextFieldOffsetInChars < FieldOffsetInChars) {
+ // We need to append padding.
+ AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
+
+ assert(NextFieldOffsetInChars == FieldOffsetInChars &&
+ "Did not add enough padding!");
+ }
AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
}
@@ -486,10 +495,14 @@ llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
// No tail padding is necessary.
} else {
// Append tail padding if necessary.
- AppendTailPadding(LayoutSizeInChars);
-
CharUnits LLVMSizeInChars =
- NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+
+ if (LLVMSizeInChars != LayoutSizeInChars)
+ AppendTailPadding(LayoutSizeInChars);
+
+ LLVMSizeInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
// Check if we need to convert the struct to a packed struct.
if (NextFieldOffsetInChars <= LayoutSizeInChars &&
@@ -501,7 +514,10 @@ llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
"Converting to packed did not help!");
}
- assert(LayoutSizeInChars == NextFieldOffsetInChars &&
+ LLVMSizeInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+
+ assert(LayoutSizeInChars == LLVMSizeInChars &&
"Tail padding mismatch!");
}
@@ -734,6 +750,20 @@ public:
// initialise any elements that have not been initialised explicitly
unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+ // Initialize remaining array elements.
+ // FIXME: This doesn't handle member pointers correctly!
+ llvm::Constant *fillC;
+ if (Expr *filler = ILE->getArrayFiller())
+ fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
+ else
+ fillC = llvm::Constant::getNullValue(ElemTy);
+ if (!fillC)
+ return nullptr;
+
+ // Try to use a ConstantAggregateZero if we can.
+ if (fillC->isNullValue() && !NumInitableElts)
+ return llvm::ConstantAggregateZero::get(AType);
+
// Copy initializer elements.
std::vector<llvm::Constant*> Elts;
Elts.reserve(NumInitableElts + NumElements);
@@ -748,15 +778,6 @@ public:
Elts.push_back(C);
}
- // Initialize remaining array elements.
- // FIXME: This doesn't handle member pointers correctly!
- llvm::Constant *fillC;
- if (Expr *filler = ILE->getArrayFiller())
- fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
- else
- fillC = llvm::Constant::getNullValue(ElemTy);
- if (!fillC)
- return nullptr;
RewriteType |= (fillC->getType() != ElemTy);
Elts.resize(NumElements, fillC);
@@ -869,7 +890,8 @@ public:
if (VD->isFileVarDecl() || VD->hasExternalStorage())
return CGM.GetAddrOfGlobalVar(VD);
else if (VD->isLocalVarDecl())
- return CGM.getStaticLocalDeclAddress(VD);
+ return CGM.getOrCreateStaticVarDecl(
+ *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
}
}
return nullptr;
@@ -1126,13 +1148,14 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
// FIXME: the target may want to specify that this is packed.
llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
Complex[1]->getType(),
- NULL);
+ nullptr);
return llvm::ConstantStruct::get(STy, Complex);
}
case APValue::Float: {
const llvm::APFloat &Init = Value.getFloat();
if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf &&
- !Context.getLangOpts().NativeHalfType)
+ !Context.getLangOpts().NativeHalfType &&
+ !Context.getLangOpts().HalfArgsAndReturns)
return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
else
return llvm::ConstantFP::get(VMContext, Init);
@@ -1148,7 +1171,7 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
// FIXME: the target may want to specify that this is packed.
llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
Complex[1]->getType(),
- NULL);
+ nullptr);
return llvm::ConstantStruct::get(STy, Complex);
}
case APValue::Vector: {
@@ -1189,9 +1212,6 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
unsigned NumElements = Value.getArraySize();
unsigned NumInitElts = Value.getArrayInitializedElts();
- std::vector<llvm::Constant*> Elts;
- Elts.reserve(NumElements);
-
// Emit array filler, if there is one.
llvm::Constant *Filler = nullptr;
if (Value.hasArrayFiller())
@@ -1199,7 +1219,18 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
CAT->getElementType(), CGF);
// Emit initializer elements.
- llvm::Type *CommonElementType = nullptr;
+ llvm::Type *CommonElementType =
+ getTypes().ConvertType(CAT->getElementType());
+
+ // Try to use a ConstantAggregateZero if we can.
+ if (Filler && Filler->isNullValue() && !NumInitElts) {
+ llvm::ArrayType *AType =
+ llvm::ArrayType::get(CommonElementType, NumElements);
+ return llvm::ConstantAggregateZero::get(AType);
+ }
+
+ std::vector<llvm::Constant*> Elts;
+ Elts.reserve(NumElements);
for (unsigned I = 0; I < NumElements; ++I) {
llvm::Constant *C = Filler;
if (I < NumInitElts)
@@ -1268,83 +1299,6 @@ CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
return getCXXABI().EmitMemberDataPointer(type, chars);
}
-static void
-FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
- SmallVectorImpl<llvm::Constant *> &Elements,
- uint64_t StartOffset) {
- assert(StartOffset % CGM.getContext().getCharWidth() == 0 &&
- "StartOffset not byte aligned!");
-
- if (CGM.getTypes().isZeroInitializable(T))
- return;
-
- if (const ConstantArrayType *CAT =
- CGM.getContext().getAsConstantArrayType(T)) {
- QualType ElementTy = CAT->getElementType();
- uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
-
- for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
- FillInNullDataMemberPointers(CGM, ElementTy, Elements,
- StartOffset + I * ElementSize);
- }
- } else if (const RecordType *RT = T->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
-
- // Go through all bases and fill in any null pointer to data members.
- for (const auto &I : RD->bases()) {
- if (I.isVirtual()) {
- // Ignore virtual bases.
- continue;
- }
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
-
- // Ignore empty bases.
- if (BaseDecl->isEmpty())
- continue;
-
- // Ignore bases that don't have any pointer to data members.
- if (CGM.getTypes().isZeroInitializable(BaseDecl))
- continue;
-
- uint64_t BaseOffset =
- CGM.getContext().toBits(Layout.getBaseClassOffset(BaseDecl));
- FillInNullDataMemberPointers(CGM, I.getType(),
- Elements, StartOffset + BaseOffset);
- }
-
- // Visit all fields.
- unsigned FieldNo = 0;
- for (RecordDecl::field_iterator I = RD->field_begin(),
- E = RD->field_end(); I != E; ++I, ++FieldNo) {
- QualType FieldType = I->getType();
-
- if (CGM.getTypes().isZeroInitializable(FieldType))
- continue;
-
- uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
- FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
- }
- } else {
- assert(T->isMemberPointerType() && "Should only see member pointers here!");
- assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
- "Should only see pointers to data members here!");
-
- CharUnits StartIndex = CGM.getContext().toCharUnitsFromBits(StartOffset);
- CharUnits EndIndex = StartIndex + CGM.getContext().getTypeSizeInChars(T);
-
- // FIXME: hardcodes Itanium member pointer representation!
- llvm::Constant *NegativeOne =
- llvm::ConstantInt::get(CGM.Int8Ty, -1ULL, /*isSigned*/true);
-
- // Fill in the null data member pointer.
- for (CharUnits I = StartIndex; I != EndIndex; ++I)
- Elements[I.getQuantity()] = NegativeOne;
- }
-}
-
static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
llvm::Type *baseType,
const CXXRecordDecl *base);
@@ -1433,32 +1387,8 @@ static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
if (baseLayout.isZeroInitializableAsBase())
return llvm::Constant::getNullValue(baseType);
- // If the base type is a struct, we can just use its null constant.
- if (isa<llvm::StructType>(baseType)) {
- return EmitNullConstant(CGM, base, /*complete*/ false);
- }
-
- // Otherwise, some bases are represented as arrays of i8 if the size
- // of the base is smaller than its corresponding LLVM type. Figure
- // out how many elements this base array has.
- llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
- unsigned numBaseElements = baseArrayType->getNumElements();
-
- // Fill in null data member pointers.
- SmallVector<llvm::Constant *, 16> baseElements(numBaseElements);
- FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
- baseElements, 0);
-
- // Now go through all other elements and zero them out.
- if (numBaseElements) {
- llvm::Constant *i8_zero = llvm::Constant::getNullValue(CGM.Int8Ty);
- for (unsigned i = 0; i != numBaseElements; ++i) {
- if (!baseElements[i])
- baseElements[i] = i8_zero;
- }
- }
-
- return llvm::ConstantArray::get(baseArrayType, baseElements);
+ // Otherwise, we can just use its null constant.
+ return EmitNullConstant(CGM, base, /*asCompleteObject=*/false);
}
llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
@@ -1489,9 +1419,7 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
assert(T->isMemberPointerType() && "Should only see member pointers here!");
assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
"Should only see pointers to data members here!");
-
- // Itanium C++ ABI 2.3:
- // A NULL pointer is represented as -1.
+
return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 140e9aa3445f..a9cbf05da104 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -85,18 +85,54 @@ public:
return CGF.EmitCheckedLValue(E, TCK);
}
- void EmitBinOpCheck(Value *Check, const BinOpInfo &Info);
+ void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerKind>> Checks,
+ const BinOpInfo &Info);
Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
}
+ void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
+ const AlignValueAttr *AVAttr = nullptr;
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const ValueDecl *VD = DRE->getDecl();
+
+ if (VD->getType()->isReferenceType()) {
+ if (const auto *TTy =
+ dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
+ AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
+ } else {
+ // Assumptions for function parameters are emitted at the start of the
+ // function, so there is no need to repeat that here.
+ if (isa<ParmVarDecl>(VD))
+ return;
+
+ AVAttr = VD->getAttr<AlignValueAttr>();
+ }
+ }
+
+ if (!AVAttr)
+ if (const auto *TTy =
+ dyn_cast<TypedefType>(E->getType()))
+ AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
+
+ if (!AVAttr)
+ return;
+
+ Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
+ llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
+ CGF.EmitAlignmentAssumption(V, AlignmentCI->getZExtValue());
+ }
+
/// EmitLoadOfLValue - Given an expression with complex type that represents a
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
Value *EmitLoadOfLValue(const Expr *E) {
- return EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
- E->getExprLoc());
+ Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
+ E->getExprLoc());
+
+ EmitLValueAlignmentAssumption(E, V);
+ return V;
}
/// EmitConversionToBool - Convert the specified expression value to a
@@ -160,6 +196,7 @@ public:
//===--------------------------------------------------------------------===//
Value *Visit(Expr *E) {
+ ApplyDebugLocation DL(CGF, E->getLocStart());
return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
}
@@ -274,6 +311,10 @@ public:
Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
if (E->getType()->isVariablyModifiedType())
CGF.EmitVariablyModifiedType(E->getType());
+
+ if (CGDebugInfo *DI = CGF.getDebugInfo())
+ DI->EmitExplicitCastType(E->getType());
+
return VisitCastExpr(E);
}
Value *VisitCastExpr(CastExpr *E);
@@ -282,7 +323,10 @@ public:
if (E->getCallReturnType()->isReferenceType())
return EmitLoadOfLValue(E);
- return CGF.EmitCallExpr(E).getScalarVal();
+ Value *V = CGF.EmitCallExpr(E).getScalarVal();
+
+ EmitLValueAlignmentAssumption(E, V);
+ return V;
}
Value *VisitStmtExpr(const StmtExpr *E);
@@ -410,7 +454,7 @@ public:
case LangOptions::SOB_Defined:
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
case LangOptions::SOB_Undefined:
- if (!CGF.SanOpts->SignedIntegerOverflow)
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
// Fall through.
case LangOptions::SOB_Trapping:
@@ -418,7 +462,8 @@ public:
}
}
- if (Ops.Ty->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow)
+ if (Ops.Ty->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
return EmitOverflowCheckedBinOp(Ops);
if (Ops.LHS->getType()->isFPOrFPVectorTy())
@@ -682,8 +727,8 @@ void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc,
CGF.EmitCheckTypeDescriptor(OrigSrcType),
CGF.EmitCheckTypeDescriptor(DstType)
};
- CGF.EmitCheck(Check, "float_cast_overflow", StaticArgs, OrigSrc,
- CodeGenFunction::CRK_Recoverable);
+ CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
+ "float_cast_overflow", StaticArgs, OrigSrc);
}
/// EmitScalarConversion - Emit a conversion from the specified type to the
@@ -701,7 +746,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
llvm::Type *SrcTy = Src->getType();
// If casting to/from storage-only half FP, use special intrinsics.
- if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
+ if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType &&
+ !CGF.getContext().getLangOpts().HalfArgsAndReturns) {
Src = Builder.CreateCall(
CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
CGF.CGM.FloatTy),
@@ -767,13 +813,14 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// An overflowing conversion has undefined behavior if either the source type
// or the destination type is a floating-point type.
- if (CGF.SanOpts->FloatCastOverflow &&
+ if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
(OrigSrcType->isFloatingType() || DstType->isFloatingType()))
EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType,
DstTy);
// Cast to half via float
- if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType)
+ if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType &&
+ !CGF.getContext().getLangOpts().HalfArgsAndReturns)
DstTy = CGF.FloatTy;
if (isa<llvm::IntegerType>(SrcTy)) {
@@ -839,8 +886,10 @@ Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
/// \brief Emit a sanitization check for the given "binary" operation (which
/// might actually be a unary increment which has been lowered to a binary
-/// operation). The check passes if \p Check, which is an \c i1, is \c true.
-void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) {
+/// operation). The check passes if all values in \p Checks (which are \c i1),
+/// are \c true.
+void ScalarExprEmitter::EmitBinOpCheck(
+ ArrayRef<std::pair<Value *, SanitizerKind>> Checks, const BinOpInfo &Info) {
assert(CGF.IsSanitizerScope);
StringRef CheckName;
SmallVector<llvm::Constant *, 4> StaticData;
@@ -870,7 +919,7 @@ void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) {
CheckName = "divrem_overflow";
StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
} else {
- // Signed arithmetic overflow (+, -, *).
+ // Arithmetic overflow (+, -, *).
switch (Opcode) {
case BO_Add: CheckName = "add_overflow"; break;
case BO_Sub: CheckName = "sub_overflow"; break;
@@ -883,8 +932,7 @@ void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) {
DynamicData.push_back(Info.RHS);
}
- CGF.EmitCheck(Check, CheckName, StaticData, DynamicData,
- CodeGenFunction::CRK_Recoverable);
+ CGF.EmitCheck(Checks, CheckName, StaticData, DynamicData);
}
//===----------------------------------------------------------------------===//
@@ -1076,7 +1124,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
Value *Idx = Visit(E->getIdx());
QualType IdxTy = E->getIdx()->getType();
- if (CGF.SanOpts->ArrayBounds)
+ if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
return Builder.CreateExtractElement(Base, Idx, "vecext");
@@ -1304,8 +1352,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
llvm::Type *DstTy = ConvertType(DestTy);
if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
- llvm::Type *MidTy = CGF.CGM.getDataLayout().getIntPtrType(SrcTy);
- return Builder.CreateIntToPtr(Builder.CreatePtrToInt(Src, MidTy), DstTy);
+ llvm_unreachable("wrong cast for pointers in different address spaces"
+ "(must be an address space cast)!");
}
return Builder.CreateBitCast(Src, DstTy);
}
@@ -1344,9 +1392,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
E->getType()->getPointeeCXXRecordDecl();
assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!");
- return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
- CE->path_begin(), CE->path_end(),
- ShouldNullCheckClassCastValue(CE));
+ return CGF.GetAddressOfBaseClass(
+ Visit(E), DerivedClassDecl, CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
}
case CK_Dynamic: {
Value *V = Visit(const_cast<Expr*>(E));
@@ -1364,8 +1412,11 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// anything here.
if (!E->getType()->isVariableArrayType()) {
assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
- assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
- ->getElementType()) &&
+ V = CGF.Builder.CreatePointerCast(
+ V, ConvertType(E->getType())->getPointerTo(
+ V->getType()->getPointerAddressSpace()));
+
+ assert(isa<llvm::ArrayType>(V->getType()->getPointerElementType()) &&
"Expected pointer to array");
V = Builder.CreateStructGEP(V, 0, "arraydecay");
}
@@ -1528,7 +1579,7 @@ EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
case LangOptions::SOB_Defined:
return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
case LangOptions::SOB_Undefined:
- if (!CGF.SanOpts->SignedIntegerOverflow)
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
// Fall through.
case LangOptions::SOB_Trapping:
@@ -1576,9 +1627,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// checking, and fall into the slow path with the atomic cmpxchg loop.
if (!type->isBooleanType() && type->isIntegerType() &&
!(type->isUnsignedIntegerType() &&
- CGF.SanOpts->UnsignedIntegerOverflow) &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
CGF.getLangOpts().getSignedOverflowBehavior() !=
- LangOptions::SOB_Trapping) {
+ LangOptions::SOB_Trapping) {
llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
llvm::AtomicRMWInst::Sub;
llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
@@ -1627,7 +1678,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CanOverflow && type->isSignedIntegerOrEnumerationType()) {
value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
} else if (CanOverflow && type->isUnsignedIntegerType() &&
- CGF.SanOpts->UnsignedIntegerOverflow) {
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
BinOpInfo BinOp;
BinOp.LHS = value;
BinOp.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
@@ -1691,7 +1742,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Add the inc/dec to the real part.
llvm::Value *amt;
- if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
+ if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType &&
+ !CGF.getContext().getLangOpts().HalfArgsAndReturns) {
// Another special case: half FP increment should be done via float
value = Builder.CreateCall(
CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
@@ -1714,7 +1766,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
- if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType)
+ if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType &&
+ !CGF.getContext().getLangOpts().HalfArgsAndReturns)
value = Builder.CreateCall(
CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
CGF.CGM.FloatTy),
@@ -1740,11 +1793,11 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (atomicPHI) {
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
- llvm::Value *pair = Builder.CreateAtomicCmpXchg(
- LV.getAddress(), atomicPHI, CGF.EmitToMemory(value, type),
- llvm::SequentiallyConsistent, llvm::SequentiallyConsistent);
- llvm::Value *old = Builder.CreateExtractValue(pair, 0);
- llvm::Value *success = Builder.CreateExtractValue(pair, 1);
+ auto Pair = CGF.EmitAtomicCompareExchange(
+ LV, RValue::get(atomicPHI), RValue::get(CGF.EmitToMemory(value, type)),
+ E->getExprLoc());
+ llvm::Value *old = Pair.first.getScalarVal();
+ llvm::Value *success = Pair.second.getScalarVal();
atomicPHI->addIncoming(old, opBB);
Builder.CreateCondBr(success, contBB, opBB);
Builder.SetInsertPoint(contBB);
@@ -2019,10 +2072,10 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
QualType type = atomicTy->getValueType();
if (!type->isBooleanType() && type->isIntegerType() &&
- !(type->isUnsignedIntegerType() &&
- CGF.SanOpts->UnsignedIntegerOverflow) &&
- CGF.getLangOpts().getSignedOverflowBehavior() !=
- LangOptions::SOB_Trapping) {
+ !(type->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
+ CGF.getLangOpts().getSignedOverflowBehavior() !=
+ LangOptions::SOB_Trapping) {
llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP;
switch (OpInfo.Opcode) {
// We don't have atomicrmw operands for *, %, /, <<, >>
@@ -2084,11 +2137,11 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
if (atomicPHI) {
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
- llvm::Value *pair = Builder.CreateAtomicCmpXchg(
- LHSLV.getAddress(), atomicPHI, CGF.EmitToMemory(Result, LHSTy),
- llvm::SequentiallyConsistent, llvm::SequentiallyConsistent);
- llvm::Value *old = Builder.CreateExtractValue(pair, 0);
- llvm::Value *success = Builder.CreateExtractValue(pair, 1);
+ auto Pair = CGF.EmitAtomicCompareExchange(
+ LHSLV, RValue::get(atomicPHI),
+ RValue::get(CGF.EmitToMemory(Result, LHSTy)), E->getExprLoc());
+ llvm::Value *old = Pair.first.getScalarVal();
+ llvm::Value *success = Pair.second.getScalarVal();
atomicPHI->addIncoming(old, opBB);
Builder.CreateCondBr(success, contBB, opBB);
Builder.SetInsertPoint(contBB);
@@ -2131,12 +2184,14 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
- llvm::Value *Cond = nullptr;
+ SmallVector<std::pair<llvm::Value *, SanitizerKind>, 2> Checks;
- if (CGF.SanOpts->IntegerDivideByZero)
- Cond = Builder.CreateICmpNE(Ops.RHS, Zero);
+ if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
+ Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
+ SanitizerKind::IntegerDivideByZero));
+ }
- if (CGF.SanOpts->SignedIntegerOverflow &&
+ if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
Ops.Ty->hasSignedIntegerRepresentation()) {
llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
@@ -2146,26 +2201,29 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
- llvm::Value *Overflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
- Cond = Cond ? Builder.CreateAnd(Cond, Overflow, "and") : Overflow;
+ llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
+ Checks.push_back(
+ std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
}
- if (Cond)
- EmitBinOpCheck(Cond, Ops);
+ if (Checks.size() > 0)
+ EmitBinOpCheck(Checks, Ops);
}
Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
{
CodeGenFunction::SanitizerScope SanScope(&CGF);
- if ((CGF.SanOpts->IntegerDivideByZero ||
- CGF.SanOpts->SignedIntegerOverflow) &&
+ if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
+ CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
Ops.Ty->isIntegerType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
- } else if (CGF.SanOpts->FloatDivideByZero &&
+ } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
Ops.Ty->isRealFloatingType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
- EmitBinOpCheck(Builder.CreateFCmpUNE(Ops.RHS, Zero), Ops);
+ llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
+ EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
+ Ops);
}
}
@@ -2189,7 +2247,7 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
// Rem in C can't be a floating point type: C99 6.5.5p2.
- if (CGF.SanOpts->IntegerDivideByZero) {
+ if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
@@ -2248,9 +2306,12 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
if (handlerName->empty()) {
// If the signed-integer-overflow sanitizer is enabled, emit a call to its
// runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
- if (!isSigned || CGF.SanOpts->SignedIntegerOverflow) {
+ if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
- EmitBinOpCheck(Builder.CreateNot(overflow), Ops);
+ llvm::Value *NotOverflow = Builder.CreateNot(overflow);
+ SanitizerKind Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
+ : SanitizerKind::UnsignedIntegerOverflow;
+ EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
} else
CGF.EmitTrapCheck(Builder.CreateNot(overflow));
return result;
@@ -2336,7 +2397,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
if (isSubtraction)
index = CGF.Builder.CreateNeg(index, "idx.neg");
- if (CGF.SanOpts->ArrayBounds)
+ if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
/*Accessed*/ false);
@@ -2476,7 +2537,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
case LangOptions::SOB_Defined:
return Builder.CreateAdd(op.LHS, op.RHS, "add");
case LangOptions::SOB_Undefined:
- if (!CGF.SanOpts->SignedIntegerOverflow)
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
// Fall through.
case LangOptions::SOB_Trapping:
@@ -2484,7 +2545,8 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
}
}
- if (op.Ty->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow)
+ if (op.Ty->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
return EmitOverflowCheckedBinOp(op);
if (op.LHS->getType()->isFPOrFPVectorTy()) {
@@ -2506,7 +2568,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
case LangOptions::SOB_Defined:
return Builder.CreateSub(op.LHS, op.RHS, "sub");
case LangOptions::SOB_Undefined:
- if (!CGF.SanOpts->SignedIntegerOverflow)
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
// Fall through.
case LangOptions::SOB_Trapping:
@@ -2514,7 +2576,8 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
}
}
- if (op.Ty->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow)
+ if (op.Ty->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
return EmitOverflowCheckedBinOp(op);
if (op.LHS->getType()->isFPOrFPVectorTy()) {
@@ -2601,7 +2664,7 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- if (CGF.SanOpts->Shift && !CGF.getLangOpts().OpenCL &&
+ if (CGF.SanOpts.has(SanitizerKind::Shift) && !CGF.getLangOpts().OpenCL &&
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, RHS);
@@ -2638,7 +2701,7 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
Valid = P;
}
- EmitBinOpCheck(Valid, Ops);
+ EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::Shift), Ops);
}
// OpenCL 6.3j: shift values are effectively % word size of LHS.
if (CGF.getLangOpts().OpenCL)
@@ -2654,10 +2717,12 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- if (CGF.SanOpts->Shift && !CGF.getLangOpts().OpenCL &&
+ if (CGF.SanOpts.has(SanitizerKind::Shift) && !CGF.getLangOpts().OpenCL &&
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
- EmitBinOpCheck(Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)), Ops);
+ llvm::Value *Valid =
+ Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
+ EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::Shift), Ops);
}
// OpenCL 6.3j: shift values are effectively % word size of LHS.
@@ -2708,6 +2773,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
TestAndClearIgnoreResultAssign();
Value *Result;
QualType LHSTy = E->getLHS()->getType();
+ QualType RHSTy = E->getRHS()->getType();
if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
assert(E->getOpcode() == BO_EQ ||
E->getOpcode() == BO_NE);
@@ -2715,7 +2781,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
Value *RHS = CGF.EmitScalarExpr(E->getRHS());
Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
- } else if (!LHSTy->isAnyComplexType()) {
+ } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
Value *LHS = Visit(E->getLHS());
Value *RHS = Visit(E->getRHS());
@@ -2803,10 +2869,28 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
} else {
// Complex Comparison: can only be an equality comparison.
- CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
- CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
-
- QualType CETy = LHSTy->getAs<ComplexType>()->getElementType();
+ CodeGenFunction::ComplexPairTy LHS, RHS;
+ QualType CETy;
+ if (auto *CTy = LHSTy->getAs<ComplexType>()) {
+ LHS = CGF.EmitComplexExpr(E->getLHS());
+ CETy = CTy->getElementType();
+ } else {
+ LHS.first = Visit(E->getLHS());
+ LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
+ CETy = LHSTy;
+ }
+ if (auto *CTy = RHSTy->getAs<ComplexType>()) {
+ RHS = CGF.EmitComplexExpr(E->getRHS());
+ assert(CGF.getContext().hasSameUnqualifiedType(CETy,
+ CTy->getElementType()) &&
+ "The element types must always match.");
+ (void)CTy;
+ } else {
+ RHS.first = Visit(E->getRHS());
+ RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
+ assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
+ "The element types must always match.");
+ }
Value *ResultR, *ResultI;
if (CETy->isRealFloatingType()) {
@@ -2959,7 +3043,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// Emit an unconditional branch from this block to ContBlock.
{
// There is no need to emit line number for unconditional branch.
- SuppressDebugLocation S(Builder);
+ ApplyDebugLocation DL(CGF);
CGF.EmitBlock(ContBlock);
}
// Insert an entry into the phi node for the edge with the value of RHSCond.
@@ -3232,8 +3316,12 @@ Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
llvm::Value *Val = Builder.CreateLoad(ArgPtr);
// If EmitVAArg promoted the type, we must truncate it.
- if (ArgTy != Val->getType())
- Val = Builder.CreateTrunc(Val, ArgTy);
+ if (ArgTy != Val->getType()) {
+ if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
+ Val = Builder.CreateIntToPtr(Val, ArgTy);
+ else
+ Val = Builder.CreateTrunc(Val, ArgTy);
+ }
return Val;
}
diff --git a/lib/CodeGen/CGLoopInfo.cpp b/lib/CodeGen/CGLoopInfo.cpp
index a273f1d4dda8..89f43c281590 100644
--- a/lib/CodeGen/CGLoopInfo.cpp
+++ b/lib/CodeGen/CGLoopInfo.cpp
@@ -24,40 +24,39 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs) {
Attrs.VectorizerEnable == LoopAttributes::VecUnspecified)
return nullptr;
- SmallVector<Value *, 4> Args;
+ SmallVector<Metadata *, 4> Args;
// Reserve operand 0 for loop id self reference.
MDNode *TempNode = MDNode::getTemporary(Ctx, None);
Args.push_back(TempNode);
// Setting vectorizer.width
if (Attrs.VectorizerWidth > 0) {
- Value *Vals[] = { MDString::get(Ctx, "llvm.loop.vectorize.width"),
- ConstantInt::get(Type::getInt32Ty(Ctx),
- Attrs.VectorizerWidth) };
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.vectorize.width"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), Attrs.VectorizerWidth))};
Args.push_back(MDNode::get(Ctx, Vals));
}
// Setting vectorizer.unroll
if (Attrs.VectorizerUnroll > 0) {
- Value *Vals[] = { MDString::get(Ctx, "llvm.loop.interleave.count"),
- ConstantInt::get(Type::getInt32Ty(Ctx),
- Attrs.VectorizerUnroll) };
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.interleave.count"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), Attrs.VectorizerUnroll))};
Args.push_back(MDNode::get(Ctx, Vals));
}
// Setting vectorizer.enable
if (Attrs.VectorizerEnable != LoopAttributes::VecUnspecified) {
- Value *Vals[] = { MDString::get(Ctx, "llvm.loop.vectorize.enable"),
- ConstantInt::get(Type::getInt1Ty(Ctx),
- (Attrs.VectorizerEnable ==
- LoopAttributes::VecEnable)) };
+ Metadata *Vals[] = {
+ MDString::get(Ctx, "llvm.loop.vectorize.enable"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt1Ty(Ctx),
+ (Attrs.VectorizerEnable == LoopAttributes::VecEnable)))};
Args.push_back(MDNode::get(Ctx, Vals));
}
- MDNode *LoopID = MDNode::get(Ctx, Args);
- assert(LoopID->use_empty() && "LoopID should not be used");
-
// Set the first operand to itself.
+ MDNode *LoopID = MDNode::get(Ctx, Args);
LoopID->replaceOperandWith(0, LoopID);
MDNode::deleteTemporary(TempNode);
return LoopID;
diff --git a/lib/CodeGen/CGLoopInfo.h b/lib/CodeGen/CGLoopInfo.h
index 2f6f172e047a..b1693996507e 100644
--- a/lib/CodeGen/CGLoopInfo.h
+++ b/lib/CodeGen/CGLoopInfo.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGLOOPINFO_H
-#define CLANG_CODEGEN_CGLOOPINFO_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGLOOPINFO_H
+#define LLVM_CLANG_LIB_CODEGEN_CGLOOPINFO_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
@@ -133,4 +133,4 @@ private:
} // end namespace CodeGen
} // end namespace clang
-#endif // CLANG_CODEGEN_CGLOOPINFO_H
+#endif
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 8ca80808e007..34c6d94f8817 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -60,7 +60,6 @@ llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
llvm::Value *
CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
// Generate the correct selector for this literal's concrete type.
- const Expr *SubExpr = E->getSubExpr();
// Get the method.
const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
assert(BoxingMethod && "BoxingMethod is null");
@@ -73,12 +72,9 @@ CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface();
llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl);
-
- const ParmVarDecl *argDecl = *BoxingMethod->param_begin();
- QualType ArgQT = argDecl->getType().getUnqualifiedType();
- RValue RV = EmitAnyExpr(SubExpr);
+
CallArgList Args;
- Args.add(RV, ArgQT);
+ EmitCallArgs(Args, BoxingMethod, E->arg_begin(), E->arg_end());
RValue result = Runtime.GenerateMessageSend(
*this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver,
@@ -461,8 +457,8 @@ struct FinishARCDealloc : EHScopeStack::Cleanup {
/// the LLVM function and sets the other context used by
/// CodeGenFunction.
void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
- const ObjCContainerDecl *CD,
- SourceLocation StartLoc) {
+ const ObjCContainerDecl *CD) {
+ SourceLocation StartLoc = OMD->getLocStart();
FunctionArgList args;
// Check if we should generate debug info for this method.
if (OMD->hasAttr<NoDebugAttr>())
@@ -480,6 +476,7 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
args.push_back(PI);
CurGD = OMD;
+ CurEHLocation = OMD->getLocEnd();
StartFunction(OMD, OMD->getReturnType(), Fn, FI, args,
OMD->getLocation(), StartLoc);
@@ -501,15 +498,13 @@ static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
/// Generate an Objective-C method. An Objective-C method is a C function with
/// its pointer, name, and types registered in the class struture.
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
- StartObjCMethod(OMD, OMD->getClassInterface(), OMD->getLocStart());
+ StartObjCMethod(OMD, OMD->getClassInterface());
PGO.assignRegionCounters(OMD, CurFn);
assert(isa<CompoundStmt>(OMD->getBody()));
RegionCounter Cnt = getPGORegionCounter(OMD->getBody());
Cnt.beginRegion(Builder);
EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody()));
FinishFunction(OMD->getBodyRBrace());
- PGO.emitInstrumentationData();
- PGO.destroyRegionCounters();
}
/// emitStructGetterCall - Call the runtime function to load a property
@@ -744,12 +739,12 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
/// is illegal within a category.
void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
- llvm::Constant *AtomicHelperFn =
- GenerateObjCAtomicGetterCopyHelperFunction(PID);
+ llvm::Constant *AtomicHelperFn =
+ CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID);
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
assert(OMD && "Invalid call to generate getter (empty method)");
- StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
+ StartObjCMethod(OMD, IMP->getClassInterface());
generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn);
@@ -1273,12 +1268,12 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
/// is illegal within a category.
void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
- llvm::Constant *AtomicHelperFn =
- GenerateObjCAtomicSetterCopyHelperFunction(PID);
+ llvm::Constant *AtomicHelperFn =
+ CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID);
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
assert(OMD && "Invalid call to generate setter (empty method)");
- StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
+ StartObjCMethod(OMD, IMP->getClassInterface());
generateObjCSetterBody(IMP, PID, AtomicHelperFn);
@@ -1356,7 +1351,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD,
bool ctor) {
MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
- StartObjCMethod(MD, IMP->getClassInterface(), MD->getLocStart());
+ StartObjCMethod(MD, IMP->getClassInterface());
// Emit .cxx_construct.
if (ctor) {
@@ -1757,7 +1752,7 @@ void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
llvm::Constant *&fn = CGM.getARCEntrypoints().clang_arc_use;
if (!fn) {
llvm::FunctionType *fnType =
- llvm::FunctionType::get(CGM.VoidTy, ArrayRef<llvm::Type*>(), true);
+ llvm::FunctionType::get(CGM.VoidTy, None, true);
fn = CGM.CreateRuntimeFunction(fnType, "clang.arc.use");
}
@@ -1940,9 +1935,8 @@ llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
= cast<llvm::CallInst>(result->stripPointerCasts());
assert(call->getCalledValue() == CGM.getARCEntrypoints().objc_retainBlock);
- SmallVector<llvm::Value*,1> args;
call->setMetadata("clang.arc.copy_on_escape",
- llvm::MDNode::get(Builder.getContext(), args));
+ llvm::MDNode::get(Builder.getContext(), None));
}
return result;
@@ -1984,8 +1978,8 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
"clang.arc.retainAutoreleasedReturnValueMarker");
assert(metadata->getNumOperands() <= 1);
if (metadata->getNumOperands() == 0) {
- llvm::Value *string = llvm::MDString::get(getLLVMContext(), assembly);
- metadata->addOperand(llvm::MDNode::get(getLLVMContext(), string));
+ metadata->addOperand(llvm::MDNode::get(
+ getLLVMContext(), llvm::MDString::get(getLLVMContext(), assembly)));
}
}
}
@@ -2018,9 +2012,8 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value,
llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value);
if (precise == ARCImpreciseLifetime) {
- SmallVector<llvm::Value*,1> args;
call->setMetadata("clang.imprecise_release",
- llvm::MDNode::get(Builder.getContext(), args));
+ llvm::MDNode::get(Builder.getContext(), None));
}
}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 619a66ab4a69..c0dc3b8002d8 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -58,7 +58,7 @@ class LazyRuntimeFunction {
/// Initialises the lazy function with the name, return type, and the types
/// of the arguments.
- END_WITH_NULL
+ LLVM_END_WITH_NULL
void init(CodeGenModule *Mod, const char *name,
llvm::Type *RetTy, ...) {
CGM =Mod;
@@ -391,8 +391,8 @@ private:
///
/// This structure is used by both classes and categories, and contains a next
/// pointer allowing them to be chained together in a linked list.
- llvm::Constant *GenerateMethodList(const StringRef &ClassName,
- const StringRef &CategoryName,
+ llvm::Constant *GenerateMethodList(StringRef ClassName,
+ StringRef CategoryName,
ArrayRef<Selector> MethodSels,
ArrayRef<llvm::Constant *> MethodTypes,
bool isClassMethodList);
@@ -875,8 +875,8 @@ void CGObjCGNU::EmitClassRef(const std::string &className) {
llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
}
-static std::string SymbolNameForMethod(const StringRef &ClassName,
- const StringRef &CategoryName, const Selector MethodName,
+static std::string SymbolNameForMethod( StringRef ClassName,
+ StringRef CategoryName, const Selector MethodName,
bool isClassMethod) {
std::string MethodNameColonStripped = MethodName.getAsString();
std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
@@ -1296,11 +1296,11 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd, MSI);
imp = EnforceType(Builder, imp, MSI.MessengerType);
- llvm::Value *impMD[] = {
+ llvm::Metadata *impMD[] = {
llvm::MDString::get(VMContext, Sel.getAsString()),
llvm::MDString::get(VMContext, Class->getSuperClass()->getNameAsString()),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsClassMessage)
- };
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt1Ty(VMContext), IsClassMessage))};
llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
llvm::Instruction *call;
@@ -1371,12 +1371,11 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
cmd = EnforceType(Builder, cmd, SelectorTy);
Receiver = EnforceType(Builder, Receiver, IdTy);
- llvm::Value *impMD[] = {
- llvm::MDString::get(VMContext, Sel.getAsString()),
- llvm::MDString::get(VMContext, Class ? Class->getNameAsString() :""),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
- Class!=nullptr)
- };
+ llvm::Metadata *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class ? Class->getNameAsString() : ""),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt1Ty(VMContext), Class != nullptr))};
llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
CallArgList ActualArgs;
@@ -1463,8 +1462,8 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
/// Generates a MethodList. Used in construction of a objc_class and
/// objc_category structures.
llvm::Constant *CGObjCGNU::
-GenerateMethodList(const StringRef &ClassName,
- const StringRef &CategoryName,
+GenerateMethodList(StringRef ClassName,
+ StringRef CategoryName,
ArrayRef<Selector> MethodSels,
ArrayRef<llvm::Constant *> MethodTypes,
bool isClassMethodList) {
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 6f0979d06c53..f91e8e15b039 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -106,7 +106,7 @@ private:
llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
llvm::Type *longDoubleType = llvm::Type::getX86_FP80Ty(VMContext);
llvm::Type *resultType =
- llvm::StructType::get(longDoubleType, longDoubleType, NULL);
+ llvm::StructType::get(longDoubleType, longDoubleType, nullptr);
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(resultType,
params, true),
@@ -244,9 +244,9 @@ public:
Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(IdType, false, Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ IdType, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
}
@@ -264,10 +264,9 @@ public:
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
- Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
}
@@ -291,10 +290,9 @@ public:
Params.push_back(IdType);
Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
- Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
const char *name;
if (atomic && copy)
name = "objc_setProperty_atomic_copy";
@@ -319,10 +317,9 @@ public:
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
- Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
}
@@ -339,7 +336,7 @@ public:
Params.push_back(Ctx.VoidPtrTy);
Params.push_back(Ctx.VoidPtrTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false, false,
Params,
FunctionType::ExtInfo(),
RequiredArgs::All));
@@ -353,10 +350,9 @@ public:
SmallVector<CanQualType,1> Params;
Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
- Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
@@ -2446,11 +2442,11 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
printf("\n");
}
}
-
- llvm::GlobalVariable * Entry =
- CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::getString(VMContext, BitMap,false),
- "__TEXT,__objc_classname,cstring_literals", 1, true);
+
+ llvm::GlobalVariable *Entry = CreateMetadataVar(
+ "OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, BitMap, false),
+ "__TEXT,__objc_classname,cstring_literals", 1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -2553,14 +2549,6 @@ llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) {
return GetOrEmitProtocolRef(PD);
}
-static void assertPrivateName(const llvm::GlobalValue *GV) {
- StringRef NameRef = GV->getName();
- (void)NameRef;
- assert(NameRef[0] == '\01' && (NameRef[1] == 'L' || NameRef[1] == 'l'));
- assert(GV->getVisibility() == llvm::GlobalValue::DefaultVisibility);
- assert(GV->hasPrivateLinkage());
-}
-
/*
// Objective-C 1.0 extensions
struct _objc_protocol {
@@ -2624,19 +2612,17 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
OptMethodTypesExt.begin(), OptMethodTypesExt.end());
llvm::Constant *Values[] = {
- EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods,
- MethodTypesExt),
- GetClassName(PD->getObjCRuntimeNameAsString()),
- EmitProtocolList("\01L_OBJC_PROTOCOL_REFS_" + PD->getName(),
- PD->protocol_begin(),
- PD->protocol_end()),
- EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
- "__OBJC,__cat_inst_meth,regular,no_dead_strip",
- InstanceMethods),
- EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
- "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- ClassMethods)
- };
+ EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods,
+ MethodTypesExt),
+ GetClassName(PD->getObjCRuntimeNameAsString()),
+ EmitProtocolList("OBJC_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(), PD->protocol_end()),
+ EmitMethodDescList("OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods),
+ EmitMethodDescList("OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods)};
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
Values);
@@ -2645,18 +2631,15 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
assert(Entry->hasPrivateLinkage());
Entry->setInitializer(Init);
} else {
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
- llvm::GlobalValue::PrivateLinkage,
- Init,
- "\01L_OBJC_PROTOCOL_" + PD->getName());
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ Init, "OBJC_PROTOCOL_" + PD->getName());
Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
// FIXME: Is this necessary? Why only for protocol?
Entry->setAlignment(4);
Protocols[PD->getIdentifier()] = Entry;
}
- assertPrivateName(Entry);
CGM.addCompilerUsedGlobal(Entry);
return Entry;
@@ -2669,16 +2652,13 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
// We use the initializer as a marker of whether this is a forward
// reference or not. At module finalization we add the empty
// contents for protocols which were referenced but never defined.
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
- llvm::GlobalValue::PrivateLinkage,
- nullptr,
- "\01L_OBJC_PROTOCOL_" + PD->getName());
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ nullptr, "OBJC_PROTOCOL_" + PD->getName());
Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
// FIXME: Is this necessary? Why only for protocol?
Entry->setAlignment(4);
}
- assertPrivateName(Entry);
return Entry;
}
@@ -2700,19 +2680,17 @@ CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
uint64_t Size =
CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
llvm::Constant *Values[] = {
- llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
- EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
- + PD->getName(),
- "__OBJC,__cat_inst_meth,regular,no_dead_strip",
- OptInstanceMethods),
- EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
- "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- OptClassMethods),
- EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" + PD->getName(), nullptr,
- PD, ObjCTypes),
- EmitProtocolMethodTypes("\01L_OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
- MethodTypesExt, ObjCTypes)
- };
+ llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
+ EmitMethodDescList("OBJC_PROTOCOL_INSTANCE_METHODS_OPT_" + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ OptInstanceMethods),
+ EmitMethodDescList("OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ OptClassMethods),
+ EmitPropertyList("OBJC_$_PROP_PROTO_LIST_" + PD->getName(), nullptr, PD,
+ ObjCTypes),
+ EmitProtocolMethodTypes("OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
+ MethodTypesExt, ObjCTypes)};
// Return null if no extension bits are used.
if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
@@ -2776,7 +2754,7 @@ PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*,16> &PropertySet,
for (const auto *P : Proto->protocols())
PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes);
for (const auto *PD : Proto->properties()) {
- if (!PropertySet.insert(PD->getIdentifier()))
+ if (!PropertySet.insert(PD->getIdentifier()).second)
continue;
llvm::Constant *Prop[] = {
GetPropertyName(PD->getIdentifier()),
@@ -2941,19 +2919,16 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Values[0] = GetClassName(OCD->getName());
Values[1] = GetClassName(Interface->getObjCRuntimeNameAsString());
LazySymbols.insert(Interface->getIdentifier());
- Values[2] =
- EmitMethodList("\01L_OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(),
- "__OBJC,__cat_inst_meth,regular,no_dead_strip",
- InstanceMethods);
- Values[3] =
- EmitMethodList("\01L_OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(),
- "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- ClassMethods);
+ Values[2] = EmitMethodList("OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[3] = EmitMethodList("OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
if (Category) {
Values[4] =
- EmitProtocolList("\01L_OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(),
- Category->protocol_begin(),
- Category->protocol_end());
+ EmitProtocolList("OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(),
+ Category->protocol_begin(), Category->protocol_end());
} else {
Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
}
@@ -2971,9 +2946,8 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Values);
llvm::GlobalVariable *GV =
- CreateMetadataVar("\01L_OBJC_CATEGORY_" + ExtName.str(), Init,
- "__OBJC,__category,regular,no_dead_strip",
- 4, true);
+ CreateMetadataVar("OBJC_CATEGORY_" + ExtName.str(), Init,
+ "__OBJC,__category,regular,no_dead_strip", 4, true);
DefinedCategories.push_back(GV);
DefinedCategoryNames.insert(ExtName.str());
// method definition entries must be clear for next implementation.
@@ -3040,9 +3014,9 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
ObjCInterfaceDecl *Interface =
const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
llvm::Constant *Protocols =
- EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getName(),
- Interface->all_referenced_protocol_begin(),
- Interface->all_referenced_protocol_end());
+ EmitProtocolList("OBJC_CLASS_PROTOCOLS_" + ID->getName(),
+ Interface->all_referenced_protocol_begin(),
+ Interface->all_referenced_protocol_end());
unsigned Flags = FragileABI_Class_Factory;
if (ID->hasNonZeroConstructors() || ID->hasDestructors())
Flags |= FragileABI_Class_HasCXXStructors;
@@ -3093,10 +3067,9 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
Values[ 6] = EmitIvarList(ID, false);
- Values[ 7] =
- EmitMethodList("\01L_OBJC_INSTANCE_METHODS_" + ID->getName(),
- "__OBJC,__inst_meth,regular,no_dead_strip",
- InstanceMethods);
+ Values[7] = EmitMethodList("OBJC_INSTANCE_METHODS_" + ID->getName(),
+ "__OBJC,__inst_meth,regular,no_dead_strip",
+ InstanceMethods);
// cache is always NULL.
Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
Values[ 9] = Protocols;
@@ -3104,7 +3077,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
Values[11] = EmitClassExtension(ID);
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
Values);
- std::string Name("\01L_OBJC_CLASS_");
+ std::string Name("OBJC_CLASS_");
Name += ClassName;
const char *Section = "__OBJC,__class,regular,no_dead_strip";
// Check for a forward reference.
@@ -3118,7 +3091,6 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
CGM.addCompilerUsedGlobal(GV);
} else
GV = CreateMetadataVar(Name, Init, Section, 4, true);
- assertPrivateName(GV);
DefinedClasses.push_back(GV);
ImplementedClasses.push_back(Interface);
// method definition entries must be clear for next implementation.
@@ -3158,10 +3130,9 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
Values[ 6] = EmitIvarList(ID, true);
- Values[ 7] =
- EmitMethodList("\01L_OBJC_CLASS_METHODS_" + ID->getNameAsString(),
- "__OBJC,__cls_meth,regular,no_dead_strip",
- Methods);
+ Values[7] =
+ EmitMethodList("OBJC_CLASS_METHODS_" + ID->getNameAsString(),
+ "__OBJC,__cls_meth,regular,no_dead_strip", Methods);
// cache is always NULL.
Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
Values[ 9] = Protocols;
@@ -3172,7 +3143,7 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
Values);
- std::string Name("\01L_OBJC_METACLASS_");
+ std::string Name("OBJC_METACLASS_");
Name += ID->getName();
// Check for a forward reference.
@@ -3186,7 +3157,6 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::GlobalValue::PrivateLinkage,
Init, Name);
}
- assertPrivateName(GV);
GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
GV->setAlignment(4);
CGM.addCompilerUsedGlobal(GV);
@@ -3195,7 +3165,7 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
}
llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
- std::string Name = "\01L_OBJC_METACLASS_" + ID->getNameAsString();
+ std::string Name = "OBJC_METACLASS_" + ID->getNameAsString();
// FIXME: Should we look these up somewhere other than the module. Its a bit
// silly since we only generate these while processing an implementation, so
@@ -3213,12 +3183,11 @@ llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
"Forward metaclass reference has incorrect type.");
- assertPrivateName(GV);
return GV;
}
llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
- std::string Name = "\01L_OBJC_CLASS_" + ID->getNameAsString();
+ std::string Name = "OBJC_CLASS_" + ID->getNameAsString();
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
if (!GV)
@@ -3228,7 +3197,6 @@ llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
"Forward class metadata reference has incorrect type.");
- assertPrivateName(GV);
return GV;
}
@@ -3256,9 +3224,8 @@ CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
llvm::Constant *Init =
llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
- return CreateMetadataVar("\01L_OBJC_CLASSEXT_" + ID->getName(),
- Init, "__OBJC,__class_ext,regular,no_dead_strip",
- 4, true);
+ return CreateMetadataVar("OBJC_CLASSEXT_" + ID->getName(), Init,
+ "__OBJC,__class_ext,regular,no_dead_strip", 4, true);
}
/*
@@ -3314,13 +3281,13 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
llvm::GlobalVariable *GV;
if (ForClass)
- GV = CreateMetadataVar("\01L_OBJC_CLASS_VARIABLES_" + ID->getName(),
- Init, "__OBJC,__class_vars,regular,no_dead_strip",
- 4, true);
+ GV =
+ CreateMetadataVar("OBJC_CLASS_VARIABLES_" + ID->getName(), Init,
+ "__OBJC,__class_vars,regular,no_dead_strip", 4, true);
else
- GV = CreateMetadataVar("\01L_OBJC_INSTANCE_VARIABLES_" + ID->getName(),
- Init, "__OBJC,__instance_vars,regular,no_dead_strip",
- 4, true);
+ GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), Init,
+ "__OBJC,__instance_vars,regular,no_dead_strip", 4,
+ true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
}
@@ -3401,7 +3368,6 @@ llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Ty, false,
llvm::GlobalValue::PrivateLinkage, Init, Name);
- assertPrivateName(GV);
if (!Section.empty())
GV->setSection(Section);
if (Align)
@@ -4308,11 +4274,10 @@ void CGObjCCommonMac::EmitImageInfo() {
eImageInfo_GCOnly);
// Require that GC be specified and set to eImageInfo_GarbageCollected.
- llvm::Value *Ops[2] = {
- llvm::MDString::get(VMContext, "Objective-C Garbage Collection"),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- eImageInfo_GarbageCollected)
- };
+ llvm::Metadata *Ops[2] = {
+ llvm::MDString::get(VMContext, "Objective-C Garbage Collection"),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), eImageInfo_GarbageCollected))};
Mod.addModuleFlag(llvm::Module::Require, "Objective-C GC Only",
llvm::MDNode::get(VMContext, Ops));
}
@@ -4347,10 +4312,9 @@ void CGObjCMac::EmitModuleInfo() {
GetClassName(StringRef("")),
EmitModuleSymbols()
};
- CreateMetadataVar("\01L_OBJC_MODULES",
+ CreateMetadataVar("OBJC_MODULES",
llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
- "__OBJC,__module_info,regular,no_dead_strip",
- 4, true);
+ "__OBJC,__module_info,regular,no_dead_strip", 4, true);
}
llvm::Constant *CGObjCMac::EmitModuleSymbols() {
@@ -4393,10 +4357,8 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
- llvm::GlobalVariable *GV =
- CreateMetadataVar("\01L_OBJC_SYMBOLS", Init,
- "__OBJC,__symbols,regular,no_dead_strip",
- 4, true);
+ llvm::GlobalVariable *GV = CreateMetadataVar(
+ "OBJC_SYMBOLS", Init, "__OBJC,__symbols,regular,no_dead_strip", 4, true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
}
@@ -4410,10 +4372,9 @@ llvm::Value *CGObjCMac::EmitClassRefFromId(CodeGenFunction &CGF,
llvm::Constant *Casted =
llvm::ConstantExpr::getBitCast(GetClassName(II->getName()),
ObjCTypes.ClassPtrTy);
- Entry =
- CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
- "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
- 4, true);
+ Entry = CreateMetadataVar(
+ "OBJC_CLASS_REFERENCES_", Casted,
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip", 4, true);
}
return CGF.Builder.CreateLoad(Entry);
@@ -4437,10 +4398,9 @@ llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel,
llvm::Constant *Casted =
llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
ObjCTypes.SelectorPtrTy);
- Entry =
- CreateMetadataVar("\01L_OBJC_SELECTOR_REFERENCES_", Casted,
- "__OBJC,__message_refs,literal_pointers,no_dead_strip",
- 4, true);
+ Entry = CreateMetadataVar(
+ "OBJC_SELECTOR_REFERENCES_", Casted,
+ "__OBJC,__message_refs,literal_pointers,no_dead_strip", 4, true);
Entry->setExternallyInitialized(true);
}
@@ -4452,13 +4412,12 @@ llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel,
llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
llvm::GlobalVariable *&Entry = ClassNames[RuntimeName];
if (!Entry)
- Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::getString(VMContext,
- RuntimeName),
- ((ObjCABI == 2) ?
- "__TEXT,__objc_classname,cstring_literals" :
- "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ Entry = CreateMetadataVar(
+ "OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, RuntimeName),
+ ((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ 1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4772,14 +4731,13 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string &BitMap) {
// null terminate string.
unsigned char zero = 0;
BitMap += zero;
-
- llvm::GlobalVariable * Entry =
- CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::getString(VMContext, BitMap,false),
- ((ObjCABI == 2) ?
- "__TEXT,__objc_classname,cstring_literals" :
- "__TEXT,__cstring,cstring_literals"),
- 1, true);
+
+ llvm::GlobalVariable *Entry = CreateMetadataVar(
+ "OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, BitMap, false),
+ ((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ 1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4864,12 +4822,12 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
// FIXME: Avoid std::string in "Sel.getAsString()"
if (!Entry)
- Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_",
- llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
- ((ObjCABI == 2) ?
- "__TEXT,__objc_methname,cstring_literals" :
- "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ Entry = CreateMetadataVar(
+ "OBJC_METH_VAR_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
+ ((ObjCABI == 2) ? "__TEXT,__objc_methname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ 1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4886,12 +4844,12 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
if (!Entry)
- Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
- llvm::ConstantDataArray::getString(VMContext, TypeStr),
- ((ObjCABI == 2) ?
- "__TEXT,__objc_methtype,cstring_literals" :
- "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ Entry = CreateMetadataVar(
+ "OBJC_METH_VAR_TYPE_",
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
+ ((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ 1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4905,12 +4863,12 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D,
llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
if (!Entry)
- Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
- llvm::ConstantDataArray::getString(VMContext, TypeStr),
- ((ObjCABI == 2) ?
- "__TEXT,__objc_methtype,cstring_literals" :
- "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ Entry = CreateMetadataVar(
+ "OBJC_METH_VAR_TYPE_",
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
+ ((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ 1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4921,7 +4879,7 @@ llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
if (!Entry)
Entry = CreateMetadataVar(
- "\01L_OBJC_PROP_NAME_ATTR_",
+ "OBJC_PROP_NAME_ATTR_",
llvm::ConstantDataArray::getString(VMContext, Ident->getName()),
"__TEXT,__cstring,cstring_literals", 1, true);
@@ -4967,7 +4925,6 @@ void CGObjCMac::FinishModule() {
Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
Values[3] = Values[4] =
llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
- assertPrivateName(I->second);
I->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
Values));
CGM.addCompilerUsedGlobal(I->second);
@@ -5027,8 +4984,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// arm64 targets use "int" ivar offset variables. All others,
// including OS X x86_64 and Windows x86_64, use "long" ivar offsets.
- if (CGM.getTarget().getTriple().getArch() == llvm::Triple::arm64 ||
- CGM.getTarget().getTriple().getArch() == llvm::Triple::aarch64)
+ if (CGM.getTarget().getTriple().getArch() == llvm::Triple::aarch64)
IvarOffsetVarTy = IntTy;
else
IvarOffsetVarTy = LongTy;
@@ -5072,7 +5028,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// char *attributes;
// }
PropertyTy = llvm::StructType::create("struct._prop_t",
- Int8PtrTy, Int8PtrTy, NULL);
+ Int8PtrTy, Int8PtrTy, nullptr);
// struct _prop_list_t {
// uint32_t entsize; // sizeof(struct _prop_t)
@@ -5081,7 +5037,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// }
PropertyListTy =
llvm::StructType::create("struct._prop_list_t", IntTy, IntTy,
- llvm::ArrayType::get(PropertyTy, 0), NULL);
+ llvm::ArrayType::get(PropertyTy, 0), nullptr);
// struct _prop_list_t *
PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
@@ -5092,7 +5048,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// }
MethodTy = llvm::StructType::create("struct._objc_method",
SelectorPtrTy, Int8PtrTy, Int8PtrTy,
- NULL);
+ nullptr);
// struct _objc_cache *
CacheTy = llvm::StructType::create(VMContext, "struct._objc_cache");
@@ -5108,16 +5064,15 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// }
MethodDescriptionTy =
llvm::StructType::create("struct._objc_method_description",
- SelectorPtrTy, Int8PtrTy, NULL);
+ SelectorPtrTy, Int8PtrTy, nullptr);
// struct _objc_method_description_list {
// int count;
// struct _objc_method_description[1];
// }
- MethodDescriptionListTy =
- llvm::StructType::create("struct._objc_method_description_list",
- IntTy,
- llvm::ArrayType::get(MethodDescriptionTy, 0),NULL);
+ MethodDescriptionListTy = llvm::StructType::create(
+ "struct._objc_method_description_list", IntTy,
+ llvm::ArrayType::get(MethodDescriptionTy, 0), nullptr);
// struct _objc_method_description_list *
MethodDescriptionListPtrTy =
@@ -5136,7 +5091,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
llvm::StructType::create("struct._objc_protocol_extension",
IntTy, MethodDescriptionListPtrTy,
MethodDescriptionListPtrTy, PropertyListPtrTy,
- Int8PtrPtrTy, NULL);
+ Int8PtrPtrTy, nullptr);
// struct _objc_protocol_extension *
ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
@@ -5151,7 +5106,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
ProtocolListTy->setBody(llvm::PointerType::getUnqual(ProtocolListTy),
LongTy,
llvm::ArrayType::get(ProtocolTy, 0),
- NULL);
+ nullptr);
// struct _objc_protocol {
// struct _objc_protocol_extension *isa;
@@ -5164,7 +5119,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
llvm::PointerType::getUnqual(ProtocolListTy),
MethodDescriptionListPtrTy,
MethodDescriptionListPtrTy,
- NULL);
+ nullptr);
// struct _objc_protocol_list *
ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy);
@@ -5179,7 +5134,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// int ivar_offset;
// }
IvarTy = llvm::StructType::create("struct._objc_ivar",
- Int8PtrTy, Int8PtrTy, IntTy, NULL);
+ Int8PtrTy, Int8PtrTy, IntTy, nullptr);
// struct _objc_ivar_list *
IvarListTy =
@@ -5194,7 +5149,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_class_extension *
ClassExtensionTy =
llvm::StructType::create("struct._objc_class_extension",
- IntTy, Int8PtrTy, PropertyListPtrTy, NULL);
+ IntTy, Int8PtrTy, PropertyListPtrTy, nullptr);
ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
ClassTy = llvm::StructType::create(VMContext, "struct._objc_class");
@@ -5225,7 +5180,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
ProtocolListPtrTy,
Int8PtrTy,
ClassExtensionPtrTy,
- NULL);
+ nullptr);
ClassPtrTy = llvm::PointerType::getUnqual(ClassTy);
@@ -5241,7 +5196,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
llvm::StructType::create("struct._objc_category",
Int8PtrTy, Int8PtrTy, MethodListPtrTy,
MethodListPtrTy, ProtocolListPtrTy,
- IntTy, PropertyListPtrTy, NULL);
+ IntTy, PropertyListPtrTy, nullptr);
// Global metadata structures
@@ -5255,7 +5210,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
SymtabTy =
llvm::StructType::create("struct._objc_symtab",
LongTy, SelectorPtrTy, ShortTy, ShortTy,
- llvm::ArrayType::get(Int8PtrTy, 0), NULL);
+ llvm::ArrayType::get(Int8PtrTy, 0), nullptr);
SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
// struct _objc_module {
@@ -5266,7 +5221,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// }
ModuleTy =
llvm::StructType::create("struct._objc_module",
- LongTy, LongTy, Int8PtrTy, SymtabPtrTy, NULL);
+ LongTy, LongTy, Int8PtrTy, SymtabPtrTy, nullptr);
// FIXME: This is the size of the setjmp buffer and should be target
@@ -5279,7 +5234,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
ExceptionDataTy =
llvm::StructType::create("struct._objc_exception_data",
llvm::ArrayType::get(CGM.Int32Ty,SetJmpBufferSize),
- StackPtrTy, NULL);
+ StackPtrTy, nullptr);
}
@@ -5292,7 +5247,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
MethodListnfABITy =
llvm::StructType::create("struct.__method_list_t", IntTy, IntTy,
- llvm::ArrayType::get(MethodTy, 0), NULL);
+ llvm::ArrayType::get(MethodTy, 0), nullptr);
// struct method_list_t *
MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
@@ -5320,7 +5275,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
MethodListnfABIPtrTy, MethodListnfABIPtrTy,
MethodListnfABIPtrTy, MethodListnfABIPtrTy,
PropertyListPtrTy, IntTy, IntTy, Int8PtrPtrTy,
- NULL);
+ nullptr);
// struct _protocol_t*
ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
@@ -5331,7 +5286,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
ProtocolListnfABITy->setBody(LongTy,
llvm::ArrayType::get(ProtocolnfABIPtrTy, 0),
- NULL);
+ nullptr);
// struct _objc_protocol_list*
ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
@@ -5345,7 +5300,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
IvarnfABITy = llvm::StructType::create(
"struct._ivar_t", llvm::PointerType::getUnqual(IvarOffsetVarTy),
- Int8PtrTy, Int8PtrTy, IntTy, IntTy, NULL);
+ Int8PtrTy, Int8PtrTy, IntTy, IntTy, nullptr);
// struct _ivar_list_t {
// uint32 entsize; // sizeof(struct _ivar_t)
@@ -5354,7 +5309,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
IvarListnfABITy =
llvm::StructType::create("struct._ivar_list_t", IntTy, IntTy,
- llvm::ArrayType::get(IvarnfABITy, 0), NULL);
+ llvm::ArrayType::get(IvarnfABITy, 0), nullptr);
IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
@@ -5378,7 +5333,8 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
Int8PtrTy, MethodListnfABIPtrTy,
ProtocolListnfABIPtrTy,
IvarListnfABIPtrTy,
- Int8PtrTy, PropertyListPtrTy, NULL);
+ Int8PtrTy, PropertyListPtrTy,
+ nullptr);
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
@@ -5399,7 +5355,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
CachePtrTy,
llvm::PointerType::getUnqual(ImpnfABITy),
llvm::PointerType::getUnqual(ClassRonfABITy),
- NULL);
+ nullptr);
// LLVM for struct _class_t *
ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy);
@@ -5418,7 +5374,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
MethodListnfABIPtrTy,
ProtocolListnfABIPtrTy,
PropertyListPtrTy,
- NULL);
+ nullptr);
// New types for nonfragile abi messaging.
CodeGen::CodeGenTypes &Types = CGM.getTypes();
@@ -5457,7 +5413,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// };
SuperMessageRefTy =
llvm::StructType::create("struct._super_message_ref_t",
- ImpnfABITy, SelectorPtrTy, NULL);
+ ImpnfABITy, SelectorPtrTy, nullptr);
// SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
@@ -5471,7 +5427,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
EHTypeTy =
llvm::StructType::create("struct._objc_typeinfo",
llvm::PointerType::getUnqual(Int8PtrTy),
- Int8PtrTy, ClassnfABIPtrTy, NULL);
+ Int8PtrTy, ClassnfABIPtrTy, nullptr);
EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
}
@@ -5504,7 +5460,6 @@ AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
llvm::GlobalValue::PrivateLinkage,
Init,
SymbolName);
- assertPrivateName(GV);
GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection(SectionName);
CGM.addCompilerUsedGlobal(GV);
@@ -5526,22 +5481,18 @@ void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
DefinedMetaClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage);
}
}
-
- AddModuleClassList(DefinedClasses,
- "\01L_OBJC_LABEL_CLASS_$",
+
+ AddModuleClassList(DefinedClasses, "OBJC_LABEL_CLASS_$",
"__DATA, __objc_classlist, regular, no_dead_strip");
- AddModuleClassList(DefinedNonLazyClasses,
- "\01L_OBJC_LABEL_NONLAZY_CLASS_$",
+ AddModuleClassList(DefinedNonLazyClasses, "OBJC_LABEL_NONLAZY_CLASS_$",
"__DATA, __objc_nlclslist, regular, no_dead_strip");
// Build list of all implemented category addresses in array
// L_OBJC_LABEL_CATEGORY_$.
- AddModuleClassList(DefinedCategories,
- "\01L_OBJC_LABEL_CATEGORY_$",
+ AddModuleClassList(DefinedCategories, "OBJC_LABEL_CATEGORY_$",
"__DATA, __objc_catlist, regular, no_dead_strip");
- AddModuleClassList(DefinedNonLazyCategories,
- "\01L_OBJC_LABEL_NONLAZY_CATEGORY_$",
+ AddModuleClassList(DefinedNonLazyCategories, "OBJC_LABEL_NONLAZY_CATEGORY_$",
"__DATA, __objc_nlcatlist, regular, no_dead_strip");
EmitImageInfo();
@@ -5701,7 +5652,6 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
(flags & NonFragileABI_Class_Meta) ?
std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
- assertPrivateName(CLASS_RO_GV);
CLASS_RO_GV->setAlignment(
CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
CLASS_RO_GV->setSection("__DATA, __objc_const");
@@ -6038,7 +5988,6 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
llvm::GlobalValue::PrivateLinkage,
Init,
ExtCatName.str());
- assertPrivateName(GCATV);
GCATV->setAlignment(
CGM.getDataLayout().getABITypeAlignment(ObjCTypes.CategorynfABITy));
GCATV->setSection("__DATA, __objc_const");
@@ -6099,7 +6048,6 @@ CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
llvm::GlobalValue::PrivateLinkage, Init, Name);
- assertPrivateName(GV);
GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection(Section);
CGM.addCompilerUsedGlobal(GV);
@@ -6218,7 +6166,6 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
llvm::GlobalValue::PrivateLinkage,
Init,
Prefix + OID->getObjCRuntimeNameAsString());
- assertPrivateName(GV);
GV->setAlignment(
CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection("__DATA, __objc_const");
@@ -6237,7 +6184,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
// contents for protocols which were referenced but never defined.
Entry =
new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
- false, llvm::GlobalValue::WeakAnyLinkage,
+ false, llvm::GlobalValue::ExternalLinkage,
nullptr,
"\01l_OBJC_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString());
Entry->setSection("__DATA,__datacoal_nt,coalesced");
@@ -6348,8 +6295,8 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
Values);
if (Entry) {
- // Already created, update the initializer.
- assert(Entry->hasWeakAnyLinkage());
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
Entry->setInitializer(Init);
} else {
Entry =
@@ -6424,7 +6371,6 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
llvm::GlobalValue::PrivateLinkage,
Init, Name);
- assertPrivateName(GV);
GV->setSection("__DATA, __objc_const");
GV->setAlignment(
CGM.getDataLayout().getABITypeAlignment(Init->getType()));
@@ -6482,7 +6428,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
if (IsIvarOffsetKnownIdempotent(CGF, Ivar))
cast<llvm::LoadInst>(IvarOffsetValue)
->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
- llvm::MDNode::get(VMContext, ArrayRef<llvm::Value *>()));
+ llvm::MDNode::get(VMContext, None));
// This could be 32bit int or 64bit integer depending on the architecture.
// Cast it to 64bit integer value, if it is a 32bit integer ivar offset value
@@ -6673,18 +6619,15 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
getClassSymbolPrefix() +
(ID ? ID->getObjCRuntimeNameAsString() : II->getName()).str());
llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName, Weak);
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
- false, llvm::GlobalValue::PrivateLinkage,
- ClassGV,
- "\01L_OBJC_CLASSLIST_REFERENCES_$_");
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ ClassGV, "OBJC_CLASSLIST_REFERENCES_$_");
Entry->setAlignment(
CGM.getDataLayout().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
CGM.addCompilerUsedGlobal(Entry);
}
- assertPrivateName(Entry);
return CGF.Builder.CreateLoad(Entry);
}
@@ -6709,18 +6652,15 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
ClassName += ID->getObjCRuntimeNameAsString();
llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName.str(),
ID->isWeakImported());
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
- false, llvm::GlobalValue::PrivateLinkage,
- ClassGV,
- "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ ClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(
CGM.getDataLayout().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
CGM.addCompilerUsedGlobal(Entry);
}
- assertPrivateName(Entry);
return CGF.Builder.CreateLoad(Entry);
}
@@ -6736,11 +6676,10 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
MetaClassName += ID->getObjCRuntimeNameAsString();
llvm::GlobalVariable *MetaClassGV =
GetClassGlobal(MetaClassName.str(), Weak);
-
+
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
- MetaClassGV,
- "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+ MetaClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(
CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABIPtrTy));
@@ -6748,7 +6687,6 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
CGM.addCompilerUsedGlobal(Entry);
}
- assertPrivateName(Entry);
return CGF.Builder.CreateLoad(Entry);
}
@@ -6795,8 +6733,7 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// If this is a class message the metaclass is passed as the target.
llvm::Value *Target;
if (IsClassMessage)
- Target = EmitMetaClassRef(CGF, Class,
- (isCategoryImpl && Class->isWeakImported()));
+ Target = EmitMetaClassRef(CGF, Class, Class->isWeakImported());
else
Target = EmitSuperClassRef(CGF, Class);
@@ -6826,23 +6763,20 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF,
llvm::Constant *Casted =
llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
ObjCTypes.SelectorPtrTy);
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy, false,
- llvm::GlobalValue::PrivateLinkage,
- Casted, "\01L_OBJC_SELECTOR_REFERENCES_");
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ Casted, "OBJC_SELECTOR_REFERENCES_");
Entry->setExternallyInitialized(true);
Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
CGM.addCompilerUsedGlobal(Entry);
}
- assertPrivateName(Entry);
if (lval)
return Entry;
llvm::LoadInst* LI = CGF.Builder.CreateLoad(Entry);
LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
- llvm::MDNode::get(VMContext,
- ArrayRef<llvm::Value*>()));
+ llvm::MDNode::get(VMContext, None));
return LI;
}
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index fc6bee3fabed..475254649866 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_OBCJRUNTIME_H
-#define CLANG_CODEGEN_OBCJRUNTIME_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGOBJCRUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGOBJCRUNTIME_H
#include "CGBuilder.h"
#include "CGCall.h"
#include "CGValue.h"
diff --git a/lib/CodeGen/CGOpenCLRuntime.h b/lib/CodeGen/CGOpenCLRuntime.h
index 7b675c3bc1e7..0c50b92914b8 100644
--- a/lib/CodeGen/CGOpenCLRuntime.h
+++ b/lib/CodeGen/CGOpenCLRuntime.h
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_OPENCLRUNTIME_H
-#define CLANG_CODEGEN_OPENCLRUNTIME_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENCLRUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGOPENCLRUNTIME_H
#include "clang/AST/Type.h"
#include "llvm/IR/Type.h"
diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp
index 12a3a7790eec..22ee00f2c7ae 100644
--- a/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -14,7 +14,9 @@
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/StmtOpenMP.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Value.h"
@@ -24,16 +26,81 @@
using namespace clang;
using namespace CodeGen;
+namespace {
+/// \brief API for captured statement code generation in OpenMP constructs.
+class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
+public:
+ CGOpenMPRegionInfo(const OMPExecutableDirective &D, const CapturedStmt &CS,
+ const VarDecl *ThreadIDVar)
+ : CGCapturedStmtInfo(CS, CR_OpenMP), ThreadIDVar(ThreadIDVar),
+ Directive(D) {
+ assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
+ }
+
+ /// \brief Gets a variable or parameter for storing global thread id
+ /// inside OpenMP construct.
+ const VarDecl *getThreadIDVariable() const { return ThreadIDVar; }
+
+ /// \brief Gets an LValue for the current ThreadID variable.
+ LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
+
+ static bool classof(const CGCapturedStmtInfo *Info) {
+ return Info->getKind() == CR_OpenMP;
+ }
+
+ /// \brief Emit the captured statement body.
+ void EmitBody(CodeGenFunction &CGF, Stmt *S) override;
+
+ /// \brief Get the name of the capture helper.
+ StringRef getHelperName() const override { return ".omp_outlined."; }
+
+private:
+ /// \brief A variable or parameter storing global thread id for OpenMP
+ /// constructs.
+ const VarDecl *ThreadIDVar;
+ /// \brief OpenMP executable directive associated with the region.
+ const OMPExecutableDirective &Directive;
+};
+} // namespace
+
+LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
+ return CGF.MakeNaturalAlignAddrLValue(
+ CGF.GetAddrOfLocalVar(ThreadIDVar),
+ CGF.getContext().getPointerType(ThreadIDVar->getType()));
+}
+
+void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, Stmt *S) {
+ CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
+ CGF.EmitOMPPrivateClause(Directive, PrivateScope);
+ CGF.EmitOMPFirstprivateClause(Directive, PrivateScope);
+ if (PrivateScope.Privatize())
+ // Emit implicit barrier to synchronize threads and avoid data races.
+ CGF.CGM.getOpenMPRuntime().EmitOMPBarrierCall(CGF, Directive.getLocStart(),
+ /*IsExplicit=*/false);
+ CGCapturedStmtInfo::EmitBody(CGF, S);
+}
+
CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
: CGM(CGM), DefaultOpenMPPSource(nullptr) {
IdentTy = llvm::StructType::create(
"ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */,
CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */,
- CGM.Int8PtrTy /* psource */, NULL);
+ CGM.Int8PtrTy /* psource */, nullptr);
// Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
llvm::PointerType::getUnqual(CGM.Int32Ty)};
Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
+ KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
+}
+
+llvm::Value *
+CGOpenMPRuntime::EmitOpenMPOutlinedFunction(const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar) {
+ const CapturedStmt *CS = cast<CapturedStmt>(D.getAssociatedStmt());
+ CodeGenFunction CGF(CGM, true);
+ CGOpenMPRegionInfo CGInfo(D, *CS, ThreadIDVar);
+ CGF.CapturedStmtInfo = &CGInfo;
+ return CGF.GenerateCapturedStmtFunction(*CS);
}
llvm::Value *
@@ -50,11 +117,10 @@ CGOpenMPRuntime::GetOrCreateDefaultOpenMPLocation(OpenMPLocationFlags Flags) {
DefaultOpenMPPSource =
llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
}
- llvm::GlobalVariable *DefaultOpenMPLocation = cast<llvm::GlobalVariable>(
- CGM.CreateRuntimeVariable(IdentTy, ".kmpc_default_loc.addr"));
+ auto DefaultOpenMPLocation = new llvm::GlobalVariable(
+ CGM.getModule(), IdentTy, /*isConstant*/ true,
+ llvm::GlobalValue::PrivateLinkage, /*Initializer*/ nullptr);
DefaultOpenMPLocation->setUnnamedAddr(true);
- DefaultOpenMPLocation->setConstant(true);
- DefaultOpenMPLocation->setLinkage(llvm::GlobalValue::PrivateLinkage);
llvm::Constant *Zero = llvm::ConstantInt::get(CGM.Int32Ty, 0, true);
llvm::Constant *Values[] = {Zero,
@@ -62,6 +128,7 @@ CGOpenMPRuntime::GetOrCreateDefaultOpenMPLocation(OpenMPLocationFlags Flags) {
Zero, Zero, DefaultOpenMPPSource};
llvm::Constant *Init = llvm::ConstantStruct::get(IdentTy, Values);
DefaultOpenMPLocation->setInitializer(Init);
+ OpenMPDefaultLocMap[Flags] = DefaultOpenMPLocation;
return DefaultOpenMPLocation;
}
return Entry;
@@ -77,14 +144,17 @@ llvm::Value *CGOpenMPRuntime::EmitOpenMPUpdateLocation(
assert(CGF.CurFn && "No function in current CodeGenFunction.");
llvm::Value *LocValue = nullptr;
- OpenMPLocMapTy::iterator I = OpenMPLocMap.find(CGF.CurFn);
- if (I != OpenMPLocMap.end()) {
- LocValue = I->second;
- } else {
+ auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
+ if (I != OpenMPLocThreadIDMap.end())
+ LocValue = I->second.DebugLoc;
+ // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
+ // GetOpenMPThreadID was called before this routine.
+ if (LocValue == nullptr) {
// Generate "ident_t .kmpc_loc.addr;"
llvm::AllocaInst *AI = CGF.CreateTempAlloca(IdentTy, ".kmpc_loc.addr");
AI->setAlignment(CGM.getDataLayout().getPrefTypeAlignment(IdentTy));
- OpenMPLocMap[CGF.CurFn] = AI;
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ Elem.second.DebugLoc = AI;
LocValue = AI;
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
@@ -95,7 +165,7 @@ llvm::Value *CGOpenMPRuntime::EmitOpenMPUpdateLocation(
}
// char **psource = &.kmpc_loc_<flags>.addr.psource;
- llvm::Value *PSource =
+ auto *PSource =
CGF.Builder.CreateConstInBoundsGEP2_32(LocValue, 0, IdentField_PSource);
auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
@@ -119,32 +189,54 @@ llvm::Value *CGOpenMPRuntime::EmitOpenMPUpdateLocation(
return LocValue;
}
-llvm::Value *CGOpenMPRuntime::GetOpenMPGlobalThreadNum(CodeGenFunction &CGF,
- SourceLocation Loc) {
+llvm::Value *CGOpenMPRuntime::GetOpenMPThreadID(CodeGenFunction &CGF,
+ SourceLocation Loc) {
assert(CGF.CurFn && "No function in current CodeGenFunction.");
- llvm::Value *GTid = nullptr;
- OpenMPGtidMapTy::iterator I = OpenMPGtidMap.find(CGF.CurFn);
- if (I != OpenMPGtidMap.end()) {
- GTid = I->second;
+ llvm::Value *ThreadID = nullptr;
+ // Check whether we've already cached a load of the thread id in this
+ // function.
+ auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
+ if (I != OpenMPLocThreadIDMap.end()) {
+ ThreadID = I->second.ThreadID;
+ if (ThreadID != nullptr)
+ return ThreadID;
+ }
+ if (auto OMPRegionInfo =
+ dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
+ // Check if this an outlined function with thread id passed as argument.
+ auto ThreadIDVar = OMPRegionInfo->getThreadIDVariable();
+ auto LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
+ auto RVal = CGF.EmitLoadOfLValue(LVal, Loc);
+ LVal = CGF.MakeNaturalAlignAddrLValue(RVal.getScalarVal(),
+ ThreadIDVar->getType());
+ ThreadID = CGF.EmitLoadOfLValue(LVal, Loc).getScalarVal();
+ // If value loaded in entry block, cache it and use it everywhere in
+ // function.
+ if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ Elem.second.ThreadID = ThreadID;
+ }
} else {
- // Generate "int32 .kmpc_global_thread_num.addr;"
+ // This is not an outlined function region - need to call __kmpc_int32
+ // kmpc_global_thread_num(ident_t *loc).
+ // Generate thread id value and cache this value for use across the
+ // function.
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc)};
- GTid = CGF.EmitRuntimeCall(
+ ThreadID = CGF.EmitRuntimeCall(
CreateRuntimeFunction(OMPRTL__kmpc_global_thread_num), Args);
- OpenMPGtidMap[CGF.CurFn] = GTid;
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ Elem.second.ThreadID = ThreadID;
}
- return GTid;
+ return ThreadID;
}
void CGOpenMPRuntime::FunctionFinished(CodeGenFunction &CGF) {
assert(CGF.CurFn && "No function in current CodeGenFunction.");
- if (OpenMPGtidMap.count(CGF.CurFn))
- OpenMPGtidMap.erase(CGF.CurFn);
- if (OpenMPLocMap.count(CGF.CurFn))
- OpenMPLocMap.erase(CGF.CurFn);
+ if (OpenMPLocThreadIDMap.count(CGF.CurFn))
+ OpenMPLocThreadIDMap.erase(CGF.CurFn);
}
llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
@@ -165,7 +257,7 @@ CGOpenMPRuntime::CreateRuntimeFunction(OpenMPRTLFunction Function) {
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
getKmpc_MicroPointerTy()};
llvm::FunctionType *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, true);
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
break;
}
@@ -173,10 +265,658 @@ CGOpenMPRuntime::CreateRuntimeFunction(OpenMPRTLFunction Function) {
// Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
llvm::FunctionType *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, false);
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
break;
}
+ case OMPRTL__kmpc_threadprivate_cached: {
+ // Build void *__kmpc_threadprivate_cached(ident_t *loc,
+ // kmp_int32 global_tid, void *data, size_t size, void ***cache);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ CGM.VoidPtrTy, CGM.SizeTy,
+ CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
+ break;
+ }
+ case OMPRTL__kmpc_critical: {
+ // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(KmpCriticalNameTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
+ break;
+ }
+ case OMPRTL__kmpc_threadprivate_register: {
+ // Build void __kmpc_threadprivate_register(ident_t *, void *data,
+ // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
+ // typedef void *(*kmpc_ctor)(void *);
+ auto KmpcCtorTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
+ /*isVarArg*/ false)->getPointerTo();
+ // typedef void *(*kmpc_cctor)(void *, void *);
+ llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
+ auto KmpcCopyCtorTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
+ /*isVarArg*/ false)->getPointerTo();
+ // typedef void (*kmpc_dtor)(void *);
+ auto KmpcDtorTy =
+ llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
+ ->getPointerTo();
+ llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
+ KmpcCopyCtorTy, KmpcDtorTy};
+ auto FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
+ /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
+ break;
+ }
+ case OMPRTL__kmpc_end_critical: {
+ // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(KmpCriticalNameTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
+ break;
+ }
+ case OMPRTL__kmpc_cancel_barrier: {
+ // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
+ break;
+ }
+ // Build __kmpc_for_static_init*(
+ // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
+ // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
+ // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
+ // kmp_int[32|64] incr, kmp_int[32|64] chunk);
+ case OMPRTL__kmpc_for_static_init_4: {
+ auto ITy = CGM.Int32Ty;
+ auto PtrTy = llvm::PointerType::getUnqual(ITy);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), // loc
+ CGM.Int32Ty, // tid
+ CGM.Int32Ty, // schedtype
+ llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
+ PtrTy, // p_lower
+ PtrTy, // p_upper
+ PtrTy, // p_stride
+ ITy, // incr
+ ITy // chunk
+ };
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_init_4");
+ break;
+ }
+ case OMPRTL__kmpc_for_static_init_4u: {
+ auto ITy = CGM.Int32Ty;
+ auto PtrTy = llvm::PointerType::getUnqual(ITy);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), // loc
+ CGM.Int32Ty, // tid
+ CGM.Int32Ty, // schedtype
+ llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
+ PtrTy, // p_lower
+ PtrTy, // p_upper
+ PtrTy, // p_stride
+ ITy, // incr
+ ITy // chunk
+ };
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_init_4u");
+ break;
+ }
+ case OMPRTL__kmpc_for_static_init_8: {
+ auto ITy = CGM.Int64Ty;
+ auto PtrTy = llvm::PointerType::getUnqual(ITy);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), // loc
+ CGM.Int32Ty, // tid
+ CGM.Int32Ty, // schedtype
+ llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
+ PtrTy, // p_lower
+ PtrTy, // p_upper
+ PtrTy, // p_stride
+ ITy, // incr
+ ITy // chunk
+ };
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_init_8");
+ break;
+ }
+ case OMPRTL__kmpc_for_static_init_8u: {
+ auto ITy = CGM.Int64Ty;
+ auto PtrTy = llvm::PointerType::getUnqual(ITy);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), // loc
+ CGM.Int32Ty, // tid
+ CGM.Int32Ty, // schedtype
+ llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
+ PtrTy, // p_lower
+ PtrTy, // p_upper
+ PtrTy, // p_stride
+ ITy, // incr
+ ITy // chunk
+ };
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_init_8u");
+ break;
+ }
+ case OMPRTL__kmpc_for_static_fini: {
+ // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
+ break;
+ }
+ case OMPRTL__kmpc_push_num_threads: {
+ // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
+ // kmp_int32 num_threads)
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
+ break;
+ }
+ case OMPRTL__kmpc_serialized_parallel: {
+ // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
+ break;
+ }
+ case OMPRTL__kmpc_end_serialized_parallel: {
+ // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
+ break;
+ }
+ case OMPRTL__kmpc_flush: {
+ // Build void __kmpc_flush(ident_t *loc, ...);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
+ break;
+ }
+ case OMPRTL__kmpc_master: {
+ // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
+ break;
+ }
+ case OMPRTL__kmpc_end_master: {
+ // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
+ break;
+ }
}
return RTLFn;
}
+
+llvm::Constant *
+CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
+ // Lookup the entry, lazily creating it if necessary.
+ return GetOrCreateInternalVariable(CGM.Int8PtrPtrTy,
+ Twine(CGM.getMangledName(VD)) + ".cache.");
+}
+
+llvm::Value *CGOpenMPRuntime::getOMPAddrOfThreadPrivate(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ llvm::Value *VDAddr,
+ SourceLocation Loc) {
+ auto VarTy = VDAddr->getType()->getPointerElementType();
+ llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc),
+ GetOpenMPThreadID(CGF, Loc),
+ CGF.Builder.CreatePointerCast(VDAddr, CGM.Int8PtrTy),
+ CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
+ getOrCreateThreadPrivateCache(VD)};
+ return CGF.EmitRuntimeCall(
+ CreateRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args);
+}
+
+void CGOpenMPRuntime::EmitOMPThreadPrivateVarInit(
+ CodeGenFunction &CGF, llvm::Value *VDAddr, llvm::Value *Ctor,
+ llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
+ // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
+ // library.
+ auto OMPLoc = EmitOpenMPUpdateLocation(CGF, Loc);
+ CGF.EmitRuntimeCall(CreateRuntimeFunction(OMPRTL__kmpc_global_thread_num),
+ OMPLoc);
+ // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
+ // to register constructor/destructor for variable.
+ llvm::Value *Args[] = {OMPLoc,
+ CGF.Builder.CreatePointerCast(VDAddr, CGM.VoidPtrTy),
+ Ctor, CopyCtor, Dtor};
+ CGF.EmitRuntimeCall(
+ CreateRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
+}
+
+llvm::Function *CGOpenMPRuntime::EmitOMPThreadPrivateVarDefinition(
+ const VarDecl *VD, llvm::Value *VDAddr, SourceLocation Loc,
+ bool PerformInit, CodeGenFunction *CGF) {
+ VD = VD->getDefinition(CGM.getContext());
+ if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
+ ThreadPrivateWithDefinition.insert(VD);
+ QualType ASTTy = VD->getType();
+
+ llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
+ auto Init = VD->getAnyInitializer();
+ if (CGM.getLangOpts().CPlusPlus && PerformInit) {
+ // Generate function that re-emits the declaration's initializer into the
+ // threadprivate copy of the variable VD
+ CodeGenFunction CtorCGF(CGM);
+ FunctionArgList Args;
+ ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, SourceLocation(),
+ /*Id=*/nullptr, CGM.getContext().VoidPtrTy);
+ Args.push_back(&Dst);
+
+ auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ CGM.getContext().VoidPtrTy, Args, FunctionType::ExtInfo(),
+ /*isVariadic=*/false);
+ auto FTy = CGM.getTypes().GetFunctionType(FI);
+ auto Fn = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, ".__kmpc_global_ctor_.", Loc);
+ CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
+ Args, SourceLocation());
+ auto ArgVal = CtorCGF.EmitLoadOfScalar(
+ CtorCGF.GetAddrOfLocalVar(&Dst),
+ /*Volatile=*/false, CGM.PointerAlignInBytes,
+ CGM.getContext().VoidPtrTy, Dst.getLocation());
+ auto Arg = CtorCGF.Builder.CreatePointerCast(
+ ArgVal,
+ CtorCGF.ConvertTypeForMem(CGM.getContext().getPointerType(ASTTy)));
+ CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
+ /*IsInitializer=*/true);
+ ArgVal = CtorCGF.EmitLoadOfScalar(
+ CtorCGF.GetAddrOfLocalVar(&Dst),
+ /*Volatile=*/false, CGM.PointerAlignInBytes,
+ CGM.getContext().VoidPtrTy, Dst.getLocation());
+ CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
+ CtorCGF.FinishFunction();
+ Ctor = Fn;
+ }
+ if (VD->getType().isDestructedType() != QualType::DK_none) {
+ // Generate function that emits destructor call for the threadprivate copy
+ // of the variable VD
+ CodeGenFunction DtorCGF(CGM);
+ FunctionArgList Args;
+ ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, SourceLocation(),
+ /*Id=*/nullptr, CGM.getContext().VoidPtrTy);
+ Args.push_back(&Dst);
+
+ auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ CGM.getContext().VoidTy, Args, FunctionType::ExtInfo(),
+ /*isVariadic=*/false);
+ auto FTy = CGM.getTypes().GetFunctionType(FI);
+ auto Fn = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, ".__kmpc_global_dtor_.", Loc);
+ DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
+ SourceLocation());
+ auto ArgVal = DtorCGF.EmitLoadOfScalar(
+ DtorCGF.GetAddrOfLocalVar(&Dst),
+ /*Volatile=*/false, CGM.PointerAlignInBytes,
+ CGM.getContext().VoidPtrTy, Dst.getLocation());
+ DtorCGF.emitDestroy(ArgVal, ASTTy,
+ DtorCGF.getDestroyer(ASTTy.isDestructedType()),
+ DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
+ DtorCGF.FinishFunction();
+ Dtor = Fn;
+ }
+ // Do not emit init function if it is not required.
+ if (!Ctor && !Dtor)
+ return nullptr;
+
+ llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
+ auto CopyCtorTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
+ /*isVarArg=*/false)->getPointerTo();
+ // Copying constructor for the threadprivate variable.
+ // Must be NULL - reserved by runtime, but currently it requires that this
+ // parameter is always NULL. Otherwise it fires assertion.
+ CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
+ if (Ctor == nullptr) {
+ auto CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
+ /*isVarArg=*/false)->getPointerTo();
+ Ctor = llvm::Constant::getNullValue(CtorTy);
+ }
+ if (Dtor == nullptr) {
+ auto DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
+ /*isVarArg=*/false)->getPointerTo();
+ Dtor = llvm::Constant::getNullValue(DtorTy);
+ }
+ if (!CGF) {
+ auto InitFunctionTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
+ auto InitFunction = CGM.CreateGlobalInitOrDestructFunction(
+ InitFunctionTy, ".__omp_threadprivate_init_.");
+ CodeGenFunction InitCGF(CGM);
+ FunctionArgList ArgList;
+ InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
+ CGM.getTypes().arrangeNullaryFunction(), ArgList,
+ Loc);
+ EmitOMPThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
+ InitCGF.FinishFunction();
+ return InitFunction;
+ }
+ EmitOMPThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
+ }
+ return nullptr;
+}
+
+void CGOpenMPRuntime::EmitOMPParallelCall(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ llvm::Value *OutlinedFn,
+ llvm::Value *CapturedStruct) {
+ // Build call __kmpc_fork_call(loc, 1, microtask, captured_struct/*context*/)
+ llvm::Value *Args[] = {
+ EmitOpenMPUpdateLocation(CGF, Loc),
+ CGF.Builder.getInt32(1), // Number of arguments after 'microtask' argument
+ // (there is only one additional argument - 'context')
+ CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy()),
+ CGF.EmitCastToVoidPtr(CapturedStruct)};
+ auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_fork_call);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
+
+void CGOpenMPRuntime::EmitOMPSerialCall(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ llvm::Value *OutlinedFn,
+ llvm::Value *CapturedStruct) {
+ auto ThreadID = GetOpenMPThreadID(CGF, Loc);
+ // Build calls:
+ // __kmpc_serialized_parallel(&Loc, GTid);
+ llvm::Value *SerArgs[] = {EmitOpenMPUpdateLocation(CGF, Loc), ThreadID};
+ auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_serialized_parallel);
+ CGF.EmitRuntimeCall(RTLFn, SerArgs);
+
+ // OutlinedFn(&GTid, &zero, CapturedStruct);
+ auto ThreadIDAddr = EmitThreadIDAddress(CGF, Loc);
+ auto Int32Ty =
+ CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
+ auto ZeroAddr = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".zero.addr");
+ CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ llvm::Value *OutlinedFnArgs[] = {ThreadIDAddr, ZeroAddr, CapturedStruct};
+ CGF.EmitCallOrInvoke(OutlinedFn, OutlinedFnArgs);
+
+ // __kmpc_end_serialized_parallel(&Loc, GTid);
+ llvm::Value *EndSerArgs[] = {EmitOpenMPUpdateLocation(CGF, Loc), ThreadID};
+ RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel);
+ CGF.EmitRuntimeCall(RTLFn, EndSerArgs);
+}
+
+// If we're inside an (outlined) parallel region, use the region info's
+// thread-ID variable (it is passed in a first argument of the outlined function
+// as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
+// regular serial code region, get thread ID by calling kmp_int32
+// kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
+// return the address of that temp.
+llvm::Value *CGOpenMPRuntime::EmitThreadIDAddress(CodeGenFunction &CGF,
+ SourceLocation Loc) {
+ if (auto OMPRegionInfo =
+ dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
+ return CGF.EmitLoadOfLValue(OMPRegionInfo->getThreadIDVariableLValue(CGF),
+ SourceLocation()).getScalarVal();
+ auto ThreadID = GetOpenMPThreadID(CGF, Loc);
+ auto Int32Ty =
+ CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
+ auto ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
+ CGF.EmitStoreOfScalar(ThreadID,
+ CGF.MakeNaturalAlignAddrLValue(ThreadIDTemp, Int32Ty));
+
+ return ThreadIDTemp;
+}
+
+llvm::Constant *
+CGOpenMPRuntime::GetOrCreateInternalVariable(llvm::Type *Ty,
+ const llvm::Twine &Name) {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ Out << Name;
+ auto RuntimeName = Out.str();
+ auto &Elem = *InternalVars.insert(std::make_pair(RuntimeName, nullptr)).first;
+ if (Elem.second) {
+ assert(Elem.second->getType()->getPointerElementType() == Ty &&
+ "OMP internal variable has different type than requested");
+ return &*Elem.second;
+ }
+
+ return Elem.second = new llvm::GlobalVariable(
+ CGM.getModule(), Ty, /*IsConstant*/ false,
+ llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
+ Elem.first());
+}
+
+llvm::Value *CGOpenMPRuntime::GetCriticalRegionLock(StringRef CriticalName) {
+ llvm::Twine Name(".gomp_critical_user_", CriticalName);
+ return GetOrCreateInternalVariable(KmpCriticalNameTy, Name.concat(".var"));
+}
+
+void CGOpenMPRuntime::EmitOMPCriticalRegion(
+ CodeGenFunction &CGF, StringRef CriticalName,
+ const std::function<void()> &CriticalOpGen, SourceLocation Loc) {
+ auto RegionLock = GetCriticalRegionLock(CriticalName);
+ // __kmpc_critical(ident_t *, gtid, Lock);
+ // CriticalOpGen();
+ // __kmpc_end_critical(ident_t *, gtid, Lock);
+ // Prepare arguments and build a call to __kmpc_critical
+ llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc),
+ GetOpenMPThreadID(CGF, Loc), RegionLock};
+ auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_critical);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+ CriticalOpGen();
+ // Build a call to __kmpc_end_critical
+ RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_end_critical);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
+
+static void EmitOMPIfStmt(CodeGenFunction &CGF, llvm::Value *IfCond,
+ const std::function<void()> &BodyOpGen) {
+ llvm::Value *CallBool = CGF.EmitScalarConversion(
+ IfCond,
+ CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true),
+ CGF.getContext().BoolTy);
+
+ auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
+ auto *ContBlock = CGF.createBasicBlock("omp_if.end");
+ // Generate the branch (If-stmt)
+ CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
+ CGF.EmitBlock(ThenBlock);
+ BodyOpGen();
+ // Emit the rest of bblocks/branches
+ CGF.EmitBranch(ContBlock);
+ CGF.EmitBlock(ContBlock, true);
+}
+
+void CGOpenMPRuntime::EmitOMPMasterRegion(
+ CodeGenFunction &CGF, const std::function<void()> &MasterOpGen,
+ SourceLocation Loc) {
+ // if(__kmpc_master(ident_t *, gtid)) {
+ // MasterOpGen();
+ // __kmpc_end_master(ident_t *, gtid);
+ // }
+ // Prepare arguments and build a call to __kmpc_master
+ llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc),
+ GetOpenMPThreadID(CGF, Loc)};
+ auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_master);
+ auto *IsMaster = CGF.EmitRuntimeCall(RTLFn, Args);
+ EmitOMPIfStmt(CGF, IsMaster, [&]() -> void {
+ MasterOpGen();
+ // Build a call to __kmpc_end_master.
+ // OpenMP [1.2.2 OpenMP Language Terminology]
+ // For C/C++, an executable statement, possibly compound, with a single
+ // entry at the top and a single exit at the bottom, or an OpenMP construct.
+ // * Access to the structured block must not be the result of a branch.
+ // * The point of exit cannot be a branch out of the structured block.
+ // * The point of entry must not be a call to setjmp().
+ // * longjmp() and throw() must not violate the entry/exit criteria.
+ // * An expression statement, iteration statement, selection statement, or
+ // try block is considered to be a structured block if the corresponding
+ // compound statement obtained by enclosing it in { and } would be a
+ // structured block.
+ // It is analyzed in Sema, so we can just call __kmpc_end_master() on
+ // fallthrough rather than pushing a normal cleanup for it.
+ RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_end_master);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+ });
+}
+
+void CGOpenMPRuntime::EmitOMPBarrierCall(CodeGenFunction &CGF,
+ SourceLocation Loc, bool IsExplicit) {
+ // Build call __kmpc_cancel_barrier(loc, thread_id);
+ auto Flags = static_cast<OpenMPLocationFlags>(
+ OMP_IDENT_KMPC |
+ (IsExplicit ? OMP_IDENT_BARRIER_EXPL : OMP_IDENT_BARRIER_IMPL));
+ // Build call __kmpc_cancel_barrier(loc, thread_id);
+ // Replace __kmpc_barrier() function by __kmpc_cancel_barrier() because this
+ // one provides the same functionality and adds initial support for
+ // cancellation constructs introduced in OpenMP 4.0. __kmpc_cancel_barrier()
+ // is provided default by the runtime library so it safe to make such
+ // replacement.
+ llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc, Flags),
+ GetOpenMPThreadID(CGF, Loc)};
+ auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_cancel_barrier);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
+
+/// \brief Schedule types for 'omp for' loops (these enumerators are taken from
+/// the enum sched_type in kmp.h).
+enum OpenMPSchedType {
+ /// \brief Lower bound for default (unordered) versions.
+ OMP_sch_lower = 32,
+ OMP_sch_static_chunked = 33,
+ OMP_sch_static = 34,
+ OMP_sch_dynamic_chunked = 35,
+ OMP_sch_guided_chunked = 36,
+ OMP_sch_runtime = 37,
+ OMP_sch_auto = 38,
+ /// \brief Lower bound for 'ordered' versions.
+ OMP_ord_lower = 64,
+ /// \brief Lower bound for 'nomerge' versions.
+ OMP_nm_lower = 160,
+};
+
+/// \brief Map the OpenMP loop schedule to the runtime enumeration.
+static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
+ bool Chunked) {
+ switch (ScheduleKind) {
+ case OMPC_SCHEDULE_static:
+ return Chunked ? OMP_sch_static_chunked : OMP_sch_static;
+ case OMPC_SCHEDULE_dynamic:
+ return OMP_sch_dynamic_chunked;
+ case OMPC_SCHEDULE_guided:
+ return OMP_sch_guided_chunked;
+ case OMPC_SCHEDULE_auto:
+ return OMP_sch_auto;
+ case OMPC_SCHEDULE_runtime:
+ return OMP_sch_runtime;
+ case OMPC_SCHEDULE_unknown:
+ assert(!Chunked && "chunk was specified but schedule kind not known");
+ return OMP_sch_static;
+ }
+ llvm_unreachable("Unexpected runtime schedule");
+}
+
+bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
+ bool Chunked) const {
+ auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
+ return Schedule == OMP_sch_static;
+}
+
+void CGOpenMPRuntime::EmitOMPForInit(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPScheduleClauseKind ScheduleKind,
+ unsigned IVSize, bool IVSigned,
+ llvm::Value *IL, llvm::Value *LB,
+ llvm::Value *UB, llvm::Value *ST,
+ llvm::Value *Chunk) {
+ OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunk != nullptr);
+ // Call __kmpc_for_static_init(
+ // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
+ // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
+ // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
+ // kmp_int[32|64] incr, kmp_int[32|64] chunk);
+ // TODO: Implement dynamic schedule.
+
+ // If the Chunk was not specified in the clause - use default value 1.
+ if (Chunk == nullptr)
+ Chunk = CGF.Builder.getIntN(IVSize, /*C*/ 1);
+
+ llvm::Value *Args[] = {
+ EmitOpenMPUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
+ GetOpenMPThreadID(CGF, Loc),
+ CGF.Builder.getInt32(Schedule), // Schedule type
+ IL, // &isLastIter
+ LB, // &LB
+ UB, // &UB
+ ST, // &Stride
+ CGF.Builder.getIntN(IVSize, 1), // Incr
+ Chunk // Chunk
+ };
+ assert((IVSize == 32 || IVSize == 64) &&
+ "Index size is not compatible with the omp runtime");
+ auto F = IVSize == 32 ? (IVSigned ? OMPRTL__kmpc_for_static_init_4
+ : OMPRTL__kmpc_for_static_init_4u)
+ : (IVSigned ? OMPRTL__kmpc_for_static_init_8
+ : OMPRTL__kmpc_for_static_init_8u);
+ auto RTLFn = CreateRuntimeFunction(F);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
+
+void CGOpenMPRuntime::EmitOMPForFinish(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPScheduleClauseKind ScheduleKind) {
+ assert((ScheduleKind == OMPC_SCHEDULE_static ||
+ ScheduleKind == OMPC_SCHEDULE_unknown) &&
+ "Non-static schedule kinds are not yet implemented");
+ // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
+ llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
+ GetOpenMPThreadID(CGF, Loc)};
+ auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_for_static_fini);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
+
+void CGOpenMPRuntime::EmitOMPNumThreadsClause(CodeGenFunction &CGF,
+ llvm::Value *NumThreads,
+ SourceLocation Loc) {
+ // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
+ llvm::Value *Args[] = {
+ EmitOpenMPUpdateLocation(CGF, Loc), GetOpenMPThreadID(CGF, Loc),
+ CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
+ llvm::Constant *RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_push_num_threads);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
+
+void CGOpenMPRuntime::EmitOMPFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
+ SourceLocation Loc) {
+ // Build call void __kmpc_flush(ident_t *loc, ...)
+ // FIXME: List of variables is ignored by libiomp5 runtime, no need to
+ // generate it, just request full memory fence.
+ llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc),
+ llvm::ConstantInt::get(CGM.Int32Ty, 0)};
+ auto *RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_flush);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
diff --git a/lib/CodeGen/CGOpenMPRuntime.h b/lib/CodeGen/CGOpenMPRuntime.h
index 862e8a148c56..6daf8179c14e 100644
--- a/lib/CodeGen/CGOpenMPRuntime.h
+++ b/lib/CodeGen/CGOpenMPRuntime.h
@@ -11,29 +11,31 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_OPENMPRUNTIME_H
-#define CLANG_CODEGEN_OPENMPRUNTIME_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
-#include "clang/AST/Type.h"
+#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/IR/Type.h"
-#include "llvm/IR/Value.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/ValueHandle.h"
namespace llvm {
-class AllocaInst;
-class CallInst;
-class GlobalVariable;
+class ArrayType;
class Constant;
class Function;
-class Module;
-class StructLayout;
class FunctionType;
+class GlobalVariable;
class StructType;
class Type;
class Value;
} // namespace llvm
namespace clang {
+class Expr;
+class OMPExecutableDirective;
+class VarDecl;
namespace CodeGen {
@@ -42,6 +44,52 @@ class CodeGenModule;
class CGOpenMPRuntime {
public:
+
+private:
+ enum OpenMPRTLFunction {
+ /// \brief Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
+ /// kmpc_micro microtask, ...);
+ OMPRTL__kmpc_fork_call,
+ /// \brief Call to void *__kmpc_threadprivate_cached(ident_t *loc,
+ /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
+ OMPRTL__kmpc_threadprivate_cached,
+ /// \brief Call to void __kmpc_threadprivate_register( ident_t *,
+ /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
+ OMPRTL__kmpc_threadprivate_register,
+ // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
+ OMPRTL__kmpc_global_thread_num,
+ // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit);
+ OMPRTL__kmpc_critical,
+ // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit);
+ OMPRTL__kmpc_end_critical,
+ // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
+ // global_tid);
+ OMPRTL__kmpc_cancel_barrier,
+ // Calls for static scheduling 'omp for' loops.
+ OMPRTL__kmpc_for_static_init_4,
+ OMPRTL__kmpc_for_static_init_4u,
+ OMPRTL__kmpc_for_static_init_8,
+ OMPRTL__kmpc_for_static_init_8u,
+ OMPRTL__kmpc_for_static_fini,
+ // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
+ // global_tid);
+ OMPRTL__kmpc_serialized_parallel,
+ // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
+ // global_tid);
+ OMPRTL__kmpc_end_serialized_parallel,
+ // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
+ // kmp_int32 num_threads);
+ OMPRTL__kmpc_push_num_threads,
+ // Call to void __kmpc_flush(ident_t *loc, ...);
+ OMPRTL__kmpc_flush,
+ // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
+ OMPRTL__kmpc_master,
+ // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
+ OMPRTL__kmpc_end_master,
+ };
+
/// \brief Values for bit flags used in the ident_t to describe the fields.
/// All enumeric elements are named and described in accordance with the code
/// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
@@ -63,20 +111,11 @@ public:
/// \brief Implicit barrier in 'single' directive.
OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140
};
- enum OpenMPRTLFunction {
- // Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
- // microtask, ...);
- OMPRTL__kmpc_fork_call,
- // Call to kmp_int32 kmpc_global_thread_num(ident_t *loc);
- OMPRTL__kmpc_global_thread_num
- };
-
-private:
CodeGenModule &CGM;
/// \brief Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource;
- /// \brief Map of flags and corrsponding default locations.
+ /// \brief Map of flags and corresponding default locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDefaultLocMapTy;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
llvm::Value *GetOrCreateDefaultOpenMPLocation(OpenMPLocationFlags Flags);
@@ -121,55 +160,241 @@ private:
IdentField_PSource
};
llvm::StructType *IdentTy;
- /// \brief Map for Sourcelocation and OpenMP runtime library debug locations.
+ /// \brief Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// \brief The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy;
- /// \brief Map of local debug location and functions.
- typedef llvm::DenseMap<llvm::Function *, llvm::Value *> OpenMPLocMapTy;
- OpenMPLocMapTy OpenMPLocMap;
- /// \brief Map of local gtid and functions.
- typedef llvm::DenseMap<llvm::Function *, llvm::Value *> OpenMPGtidMapTy;
- OpenMPGtidMapTy OpenMPGtidMap;
+ /// \brief Stores debug location and ThreadID for the function.
+ struct DebugLocThreadIdTy {
+ llvm::Value *DebugLoc;
+ llvm::Value *ThreadID;
+ };
+ /// \brief Map of local debug location, ThreadId and functions.
+ typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
+ OpenMPLocThreadIDMapTy;
+ OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
+ /// \brief Type kmp_critical_name, originally defined as typedef kmp_int32
+ /// kmp_critical_name[8];
+ llvm::ArrayType *KmpCriticalNameTy;
+ /// \brief An ordered map of auto-generated variables to their unique names.
+ /// It stores variables with the following names: 1) ".gomp_critical_user_" +
+ /// <critical_section_name> + ".var" for "omp critical" directives; 2)
+ /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
+ /// variables.
+ llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
+ InternalVars;
+
+ /// \brief Emits object of ident_t type with info for source location.
+ /// \param Flags Flags for OpenMP location.
+ ///
+ llvm::Value *
+ EmitOpenMPUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPLocationFlags Flags = OMP_IDENT_KMPC);
+
+ /// \brief Returns pointer to ident_t type.
+ llvm::Type *getIdentTyPointerTy();
+
+ /// \brief Returns pointer to kmpc_micro type.
+ llvm::Type *getKmpc_MicroPointerTy();
+
+ /// \brief Returns specified OpenMP runtime function.
+ /// \param Function OpenMP runtime function.
+ /// \return Specified function.
+ llvm::Constant *CreateRuntimeFunction(OpenMPRTLFunction Function);
+
+ /// \brief If the specified mangled name is not in the module, create and
+ /// return threadprivate cache object. This object is a pointer's worth of
+ /// storage that's reserved for use by the OpenMP runtime.
+ /// \param VD Threadprivate variable.
+ /// \return Cache variable for the specified threadprivate.
+ llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
+
+ /// \brief Emits address of the word in a memory where current thread id is
+ /// stored.
+ virtual llvm::Value *EmitThreadIDAddress(CodeGenFunction &CGF,
+ SourceLocation Loc);
+
+ /// \brief Gets thread id value for the current thread.
+ ///
+ llvm::Value *GetOpenMPThreadID(CodeGenFunction &CGF, SourceLocation Loc);
+
+ /// \brief Gets (if variable with the given name already exist) or creates
+ /// internal global variable with the specified Name. The created variable has
+ /// linkage CommonLinkage by default and is initialized by null value.
+ /// \param Ty Type of the global variable. If it is exist already the type
+ /// must be the same.
+ /// \param Name Name of the variable.
+ llvm::Constant *GetOrCreateInternalVariable(llvm::Type *Ty,
+ const llvm::Twine &Name);
+
+ /// \brief Set of threadprivate variables with the generated initializer.
+ llvm::DenseSet<const VarDecl *> ThreadPrivateWithDefinition;
+
+ /// \brief Emits initialization code for the threadprivate variables.
+ /// \param VDAddr Address of the global variable \a VD.
+ /// \param Ctor Pointer to a global init function for \a VD.
+ /// \param CopyCtor Pointer to a global copy function for \a VD.
+ /// \param Dtor Pointer to a global destructor function for \a VD.
+ /// \param Loc Location of threadprivate declaration.
+ void EmitOMPThreadPrivateVarInit(CodeGenFunction &CGF, llvm::Value *VDAddr,
+ llvm::Value *Ctor, llvm::Value *CopyCtor,
+ llvm::Value *Dtor, SourceLocation Loc);
+
+ /// \brief Returns corresponding lock object for the specified critical region
+ /// name. If the lock object does not exist it is created, otherwise the
+ /// reference to the existing copy is returned.
+ /// \param CriticalName Name of the critical region.
+ ///
+ llvm::Value *GetCriticalRegionLock(StringRef CriticalName);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM);
- ~CGOpenMPRuntime() {}
+ virtual ~CGOpenMPRuntime() {}
+
+ /// \brief Emits outlined function for the specified OpenMP directive \a D
+ /// (required for parallel and task directives). This outlined function has
+ /// type void(*)(kmp_int32 /*ThreadID*/, kmp_int32 /*BoundID*/, struct
+ /// context_vars*).
+ /// \param D OpenMP directive.
+ /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
+ ///
+ virtual llvm::Value *
+ EmitOpenMPOutlinedFunction(const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar);
/// \brief Cleans up references to the objects in finished function.
- /// \param CGF Reference to finished CodeGenFunction.
///
void FunctionFinished(CodeGenFunction &CGF);
- /// \brief Emits object of ident_t type with info for source location.
+ /// \brief Emits code for parallel call of the \a OutlinedFn with variables
+ /// captured in a record which address is stored in \a CapturedStruct.
+ /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
+ /// this function is void(*)(kmp_int32, kmp_int32, struct context_vars*).
+ /// \param CapturedStruct A pointer to the record with the references to
+ /// variables used in \a OutlinedFn function.
+ ///
+ virtual void EmitOMPParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ llvm::Value *OutlinedFn,
+ llvm::Value *CapturedStruct);
+
+ /// \brief Emits code for serial call of the \a OutlinedFn with variables
+ /// captured in a record which address is stored in \a CapturedStruct.
+ /// \param OutlinedFn Outlined function to be run in serial mode.
+ /// \param CapturedStruct A pointer to the record with the references to
+ /// variables used in \a OutlinedFn function.
+ ///
+ virtual void EmitOMPSerialCall(CodeGenFunction &CGF, SourceLocation Loc,
+ llvm::Value *OutlinedFn,
+ llvm::Value *CapturedStruct);
+
+ /// \brief Emits a critical region.
+ /// \param CriticalName Name of the critical region.
+ /// \param CriticalOpGen Generator for the statement associated with the given
+ /// critical region.
+ virtual void EmitOMPCriticalRegion(CodeGenFunction &CGF,
+ StringRef CriticalName,
+ const std::function<void()> &CriticalOpGen,
+ SourceLocation Loc);
+
+ /// \brief Emits a master region.
+ /// \param MasterOpGen Generator for the statement associated with the given
+ /// master region.
+ virtual void EmitOMPMasterRegion(CodeGenFunction &CGF,
+ const std::function<void()> &MasterOpGen,
+ SourceLocation Loc);
+
+ /// \brief Emits explicit barrier for OpenMP threads.
+ /// \param IsExplicit true, if it is explicitly specified barrier.
+ ///
+ virtual void EmitOMPBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
+ bool IsExplicit = true);
+
+ /// \brief Check if the specified \a ScheduleKind is static non-chunked.
+ /// This kind of worksharing directive is emitted without outer loop.
+ /// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
+ /// \param Chunked True if chunk is specified in the clause.
+ ///
+ virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
+ bool Chunked) const;
+
+ /// \brief Call the appropriate runtime routine to initialize it before start
+ /// of loop.
+ ///
+ /// Depending on the loop schedule, it is nesessary to call some runtime
+ /// routine before start of the OpenMP loop to get the loop upper / lower
+ /// bounds \a LB and \a UB and stride \a ST.
+ ///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
- /// \param Flags Flags for OpenMP location.
+ /// \param SchedKind Schedule kind, specified by the 'schedule' clause.
+ /// \param IVSize Size of the iteration variable in bits.
+ /// \param IVSigned Sign of the interation variable.
+ /// \param IL Address of the output variable in which the flag of the
+ /// last iteration is returned.
+ /// \param LB Address of the output variable in which the lower iteration
+ /// number is returned.
+ /// \param UB Address of the output variable in which the upper iteration
+ /// number is returned.
+ /// \param ST Address of the output variable in which the stride value is
+ /// returned nesessary to generated the static_chunked scheduled loop.
+ /// \param Chunk Value of the chunk for the static_chunked scheduled loop.
+ /// For the default (nullptr) value, the chunk 1 will be used.
///
- llvm::Value *
- EmitOpenMPUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPLocationFlags Flags = OMP_IDENT_KMPC);
+ virtual void EmitOMPForInit(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPScheduleClauseKind SchedKind,
+ unsigned IVSize, bool IVSigned, llvm::Value *IL,
+ llvm::Value *LB, llvm::Value *UB, llvm::Value *ST,
+ llvm::Value *Chunk = nullptr);
- /// \brief Generates global thread number value.
+ /// \brief Call the appropriate runtime routine to notify that we finished
+ /// all the work with current loop.
+ ///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
+ /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
///
- llvm::Value *GetOpenMPGlobalThreadNum(CodeGenFunction &CGF,
- SourceLocation Loc);
+ virtual void EmitOMPForFinish(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPScheduleClauseKind ScheduleKind);
- /// \brief Returns pointer to ident_t type;
- llvm::Type *getIdentTyPointerTy();
+ /// \brief Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
+ /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
+ /// clause.
+ /// \param NumThreads An integer value of threads.
+ virtual void EmitOMPNumThreadsClause(CodeGenFunction &CGF,
+ llvm::Value *NumThreads,
+ SourceLocation Loc);
- /// \brief Returns pointer to kmpc_micro type;
- llvm::Type *getKmpc_MicroPointerTy();
+ /// \brief Returns address of the threadprivate variable for the current
+ /// thread.
+ /// \param VD Threadprivate variable.
+ /// \param VDAddr Address of the global variable \a VD.
+ /// \param Loc Location of the reference to threadprivate var.
+ /// \return Address of the threadprivate variable for the current thread.
+ virtual llvm::Value *getOMPAddrOfThreadPrivate(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ llvm::Value *VDAddr,
+ SourceLocation Loc);
- /// \brief Returns specified OpenMP runtime function.
- /// \param Function OpenMP runtime function.
- /// \return Specified function.
- llvm::Constant *CreateRuntimeFunction(OpenMPRTLFunction Function);
+ /// \brief Emit a code for initialization of threadprivate variable. It emits
+ /// a call to runtime library which adds initial value to the newly created
+ /// threadprivate variable (if it is not constant) and registers destructor
+ /// for the variable (if any).
+ /// \param VD Threadprivate variable.
+ /// \param VDAddr Address of the global variable \a VD.
+ /// \param Loc Location of threadprivate declaration.
+ /// \param PerformInit true if initialization expression is not constant.
+ virtual llvm::Function *
+ EmitOMPThreadPrivateVarDefinition(const VarDecl *VD, llvm::Value *VDAddr,
+ SourceLocation Loc, bool PerformInit,
+ CodeGenFunction *CGF = nullptr);
+
+ /// \brief Emit flush of the variables specified in 'omp flush' directive.
+ /// \param Vars List of variables to flush.
+ virtual void EmitOMPFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
+ SourceLocation Loc);
};
} // namespace CodeGen
} // namespace clang
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index b45fee56ea09..2de0b2f87fc1 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGRECORDLAYOUT_H
-#define CLANG_CODEGEN_CGRECORDLAYOUT_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGRECORDLAYOUT_H
+#define LLVM_CLANG_LIB_CODEGEN_CGRECORDLAYOUT_H
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index a10d8e791b0f..7ad394b5eeb0 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -93,7 +93,7 @@ struct CGRecordLowering {
bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
};
// The constructor.
- CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D);
+ CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
// Short helper routines.
/// \brief Constructs a MemberInfo instance from an offset and llvm::Type *.
MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
@@ -174,7 +174,7 @@ struct CGRecordLowering {
/// padding that is or can potentially be used.
void clipTailPadding();
/// \brief Determines if we need a packed llvm struct.
- void determinePacked();
+ void determinePacked(bool NVBaseType);
/// \brief Inserts padding everwhere it's needed.
void insertPadding();
/// \brief Fills out the structures that are ultimately consumed.
@@ -203,12 +203,12 @@ private:
};
} // namespace {
-CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D)
+CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed)
: Types(Types), Context(Types.getContext()), D(D),
RD(dyn_cast<CXXRecordDecl>(D)),
Layout(Types.getContext().getASTRecordLayout(D)),
DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
- IsZeroInitializableAsBase(true), Packed(false) {}
+ IsZeroInitializableAsBase(true), Packed(Packed) {}
void CGRecordLowering::setBitFieldInfo(
const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
@@ -269,7 +269,7 @@ void CGRecordLowering::lower(bool NVBaseType) {
std::stable_sort(Members.begin(), Members.end());
Members.push_back(StorageInfo(Size, getIntNType(8)));
clipTailPadding();
- determinePacked();
+ determinePacked(NVBaseType);
insertPadding();
Members.pop_back();
calculateZeroInit();
@@ -279,13 +279,11 @@ void CGRecordLowering::lower(bool NVBaseType) {
void CGRecordLowering::lowerUnion() {
CharUnits LayoutSize = Layout.getSize();
llvm::Type *StorageType = nullptr;
- // Compute zero-initializable status.
- if (!D->field_empty() && !isZeroInitializable(*D->field_begin()))
- IsZeroInitializable = IsZeroInitializableAsBase = false;
+ bool SeenNamedMember = false;
// Iterate through the fields setting bitFieldInfo and the Fields array. Also
// locate the "most appropriate" storage type. The heuristic for finding the
// storage type isn't necessary, the first (non-0-length-bitfield) field's
- // type would work fine and be simpler but would be differen than what we've
+ // type would work fine and be simpler but would be different than what we've
// been doing and cause lit tests to change.
for (const auto *Field : D->fields()) {
if (Field->isBitField()) {
@@ -299,6 +297,23 @@ void CGRecordLowering::lowerUnion() {
}
Fields[Field->getCanonicalDecl()] = 0;
llvm::Type *FieldType = getStorageType(Field);
+ // Compute zero-initializable status.
+ // This union might not be zero initialized: it may contain a pointer to
+ // data member which might have some exotic initialization sequence.
+ // If this is the case, then we aught not to try and come up with a "better"
+ // type, it might not be very easy to come up with a Constant which
+ // correctly initializes it.
+ if (!SeenNamedMember && Field->getDeclName()) {
+ SeenNamedMember = true;
+ if (!isZeroInitializable(Field)) {
+ IsZeroInitializable = IsZeroInitializableAsBase = false;
+ StorageType = FieldType;
+ }
+ }
+ // Because our union isn't zero initializable, we won't be getting a better
+ // storage type.
+ if (!IsZeroInitializable)
+ continue;
// Conditionally update our storage type if we've got a new "better" one.
if (!StorageType ||
getAlignment(FieldType) > getAlignment(StorageType) ||
@@ -533,8 +548,13 @@ void CGRecordLowering::clipTailPadding() {
}
}
-void CGRecordLowering::determinePacked() {
+void CGRecordLowering::determinePacked(bool NVBaseType) {
+ if (Packed)
+ return;
CharUnits Alignment = CharUnits::One();
+ CharUnits NVAlignment = CharUnits::One();
+ CharUnits NVSize =
+ !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
MemberEnd = Members.end();
Member != MemberEnd; ++Member) {
@@ -544,12 +564,19 @@ void CGRecordLowering::determinePacked() {
// then the entire record must be packed.
if (Member->Offset % getAlignment(Member->Data))
Packed = true;
+ if (Member->Offset < NVSize)
+ NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
Alignment = std::max(Alignment, getAlignment(Member->Data));
}
// If the size of the record (the capstone's offset) is not a multiple of the
// record's alignment, it must be packed.
if (Members.back().Offset % Alignment)
Packed = true;
+ // If the non-virtual sub-object is not a multiple of the non-virtual
+ // sub-object's alignment, it must be packed. We cannot have a packed
+ // non-virtual sub-object and an unpacked complete object or vise versa.
+ if (NVSize % NVAlignment)
+ Packed = true;
// Update the alignment of the sentinal.
if (!Packed)
Members.back().Data = getIntNType(Context.toBits(Alignment));
@@ -641,20 +668,24 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
llvm::StructType *Ty) {
- CGRecordLowering Builder(*this, D);
+ CGRecordLowering Builder(*this, D, /*Packed=*/false);
- Builder.lower(false);
+ Builder.lower(/*NonVirtualBaseType=*/false);
// If we're in C++, compute the base subobject type.
llvm::StructType *BaseTy = nullptr;
if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
BaseTy = Ty;
if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
- CGRecordLowering BaseBuilder(*this, D);
- BaseBuilder.lower(true);
+ CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
+ BaseBuilder.lower(/*NonVirtualBaseType=*/true);
BaseTy = llvm::StructType::create(
getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
addRecordTypeName(D, BaseTy, ".base");
+ // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
+ // on both of them with the same index.
+ assert(Builder.Packed == BaseBuilder.Packed &&
+ "Non-virtual and complete types must agree on packedness");
}
}
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index c2b64a7662b6..e3bdf8661ef7 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -185,6 +185,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::OMPForDirectiveClass:
EmitOMPForDirective(cast<OMPForDirective>(*S));
break;
+ case Stmt::OMPForSimdDirectiveClass:
+ EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
+ break;
case Stmt::OMPSectionsDirectiveClass:
EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
break;
@@ -203,6 +206,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::OMPParallelForDirectiveClass:
EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
break;
+ case Stmt::OMPParallelForSimdDirectiveClass:
+ EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
+ break;
case Stmt::OMPParallelSectionsDirectiveClass:
EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
break;
@@ -221,6 +227,18 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::OMPFlushDirectiveClass:
EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
break;
+ case Stmt::OMPOrderedDirectiveClass:
+ EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
+ break;
+ case Stmt::OMPAtomicDirectiveClass:
+ EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
+ break;
+ case Stmt::OMPTargetDirectiveClass:
+ EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
+ break;
+ case Stmt::OMPTeamsDirectiveClass:
+ EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
+ break;
}
}
@@ -547,7 +565,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
if (const Stmt *Else = S.getElse()) {
{
// There is no need to emit line number for unconditional branch.
- SuppressDebugLocation S(Builder);
+ ApplyDebugLocation DL(*this);
EmitBlock(ElseBlock);
}
{
@@ -556,7 +574,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
}
{
// There is no need to emit line number for unconditional branch.
- SuppressDebugLocation S(Builder);
+ ApplyDebugLocation DL(*this);
EmitBranch(ContBlock);
}
}
@@ -567,13 +585,15 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
void CodeGenFunction::EmitCondBrHints(llvm::LLVMContext &Context,
llvm::BranchInst *CondBr,
- const ArrayRef<const Attr *> &Attrs) {
+ ArrayRef<const Attr *> Attrs) {
// Return if there are no hints.
if (Attrs.empty())
return;
// Add vectorize and unroll hints to the metadata on the conditional branch.
- SmallVector<llvm::Value *, 2> Metadata(1);
+ //
+ // FIXME: Should this really start with a size of 1?
+ SmallVector<llvm::Metadata *, 2> Metadata(1);
for (const auto *Attr : Attrs) {
const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(Attr);
@@ -582,8 +602,7 @@ void CodeGenFunction::EmitCondBrHints(llvm::LLVMContext &Context,
continue;
LoopHintAttr::OptionType Option = LH->getOption();
- int ValueInt = LH->getValue();
-
+ LoopHintAttr::LoopHintState State = LH->getState();
const char *MetadataName;
switch (Option) {
case LoopHintAttr::Vectorize:
@@ -595,19 +614,29 @@ void CodeGenFunction::EmitCondBrHints(llvm::LLVMContext &Context,
MetadataName = "llvm.loop.interleave.count";
break;
case LoopHintAttr::Unroll:
- MetadataName = "llvm.loop.unroll.enable";
+ // With the unroll loop hint, a non-zero value indicates full unrolling.
+ MetadataName = State == LoopHintAttr::Disable ? "llvm.loop.unroll.disable"
+ : "llvm.loop.unroll.full";
break;
case LoopHintAttr::UnrollCount:
MetadataName = "llvm.loop.unroll.count";
break;
}
- llvm::Value *Value;
+ Expr *ValueExpr = LH->getValue();
+ int ValueInt = 1;
+ if (ValueExpr) {
+ llvm::APSInt ValueAPS =
+ ValueExpr->EvaluateKnownConstInt(CGM.getContext());
+ ValueInt = static_cast<int>(ValueAPS.getSExtValue());
+ }
+
+ llvm::Constant *Value;
llvm::MDString *Name;
switch (Option) {
case LoopHintAttr::Vectorize:
case LoopHintAttr::Interleave:
- if (ValueInt == 1) {
+ if (State != LoopHintAttr::Disable) {
// FIXME: In the future I will modifiy the behavior of the metadata
// so we can enable/disable vectorization and interleaving separately.
Name = llvm::MDString::get(Context, "llvm.loop.vectorize.enable");
@@ -619,27 +648,26 @@ void CodeGenFunction::EmitCondBrHints(llvm::LLVMContext &Context,
// Fallthrough.
case LoopHintAttr::VectorizeWidth:
case LoopHintAttr::InterleaveCount:
+ case LoopHintAttr::UnrollCount:
Name = llvm::MDString::get(Context, MetadataName);
Value = llvm::ConstantInt::get(Int32Ty, ValueInt);
break;
case LoopHintAttr::Unroll:
Name = llvm::MDString::get(Context, MetadataName);
- Value = (ValueInt == 0) ? Builder.getFalse() : Builder.getTrue();
- break;
- case LoopHintAttr::UnrollCount:
- Name = llvm::MDString::get(Context, MetadataName);
- Value = llvm::ConstantInt::get(Int32Ty, ValueInt);
+ Value = nullptr;
break;
}
- SmallVector<llvm::Value *, 2> OpValues;
+ SmallVector<llvm::Metadata *, 2> OpValues;
OpValues.push_back(Name);
- OpValues.push_back(Value);
+ if (Value)
+ OpValues.push_back(llvm::ConstantAsMetadata::get(Value));
// Set or overwrite metadata indicated by Name.
Metadata.push_back(llvm::MDNode::get(Context, OpValues));
}
+ // FIXME: This condition is never false. Should it be an assert?
if (!Metadata.empty()) {
// Add llvm.loop MDNode to CondBr.
llvm::MDNode *LoopID = llvm::MDNode::get(Context, Metadata);
@@ -650,7 +678,7 @@ void CodeGenFunction::EmitCondBrHints(llvm::LLVMContext &Context,
}
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
- const ArrayRef<const Attr *> &WhileAttrs) {
+ ArrayRef<const Attr *> WhileAttrs) {
RegionCounter Cnt = getPGORegionCounter(&S);
// Emit the header for the loop, which will also become
@@ -724,6 +752,7 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
// Immediately force cleanup.
ConditionScope.ForceCleanup();
+ EmitStopPoint(&S);
// Branch to the loop header again.
EmitBranch(LoopHeader.getBlock());
@@ -739,7 +768,7 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
}
void CodeGenFunction::EmitDoStmt(const DoStmt &S,
- const ArrayRef<const Attr *> &DoAttrs) {
+ ArrayRef<const Attr *> DoAttrs) {
JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
@@ -800,14 +829,10 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
}
void CodeGenFunction::EmitForStmt(const ForStmt &S,
- const ArrayRef<const Attr *> &ForAttrs) {
+ ArrayRef<const Attr *> ForAttrs) {
JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
- RunCleanupsScope ForScope(*this);
-
- CGDebugInfo *DI = getDebugInfo();
- if (DI)
- DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
+ LexicalScope ForScope(*this, S.getSourceRange());
// Evaluate the first part before the loop.
if (S.getInit())
@@ -835,7 +860,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
// Create a cleanup scope for the condition variable cleanups.
- RunCleanupsScope ConditionScope(*this);
+ LexicalScope ConditionScope(*this, S.getSourceRange());
if (S.getCond()) {
// If the for statement has a condition scope, emit the local variable
@@ -891,13 +916,12 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
BreakContinueStack.pop_back();
ConditionScope.ForceCleanup();
+
+ EmitStopPoint(&S);
EmitBranch(CondBlock);
ForScope.ForceCleanup();
- if (DI)
- DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
-
LoopStack.pop();
// Emit the fall-through block.
@@ -906,14 +930,10 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
void
CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
- const ArrayRef<const Attr *> &ForAttrs) {
+ ArrayRef<const Attr *> ForAttrs) {
JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
- RunCleanupsScope ForScope(*this);
-
- CGDebugInfo *DI = getDebugInfo();
- if (DI)
- DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
+ LexicalScope ForScope(*this, S.getSourceRange());
// Evaluate the first pieces before the loop.
EmitStmt(S.getRangeStmt());
@@ -963,11 +983,12 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
{
// Create a separate cleanup scope for the loop variable and body.
- RunCleanupsScope BodyScope(*this);
+ LexicalScope BodyScope(*this, S.getSourceRange());
EmitStmt(S.getLoopVarStmt());
EmitStmt(S.getBody());
}
+ EmitStopPoint(&S);
// If there is an increment, emit it next.
EmitBlock(Continue.getBlock());
EmitStmt(S.getInc());
@@ -978,9 +999,6 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
ForScope.ForceCleanup();
- if (DI)
- DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
-
LoopStack.pop();
// Emit the fall-through block.
@@ -1641,6 +1659,12 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
while (Constraint[1] && Constraint[1] != ',')
Constraint++;
break;
+ case '&':
+ case '%':
+ Result += *Constraint;
+ while (Constraint[1] && Constraint[1] == *Constraint)
+ Constraint++;
+ break;
case ',':
Result += "|";
break;
@@ -1751,10 +1775,10 @@ llvm::Value* CodeGenFunction::EmitAsmInput(
/// asm.
static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
CodeGenFunction &CGF) {
- SmallVector<llvm::Value *, 8> Locs;
+ SmallVector<llvm::Metadata *, 8> Locs;
// Add the location of the first line to the MDNode.
- Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
- Str->getLocStart().getRawEncoding()));
+ Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ CGF.Int32Ty, Str->getLocStart().getRawEncoding())));
StringRef StrVal = Str->getString();
if (!StrVal.empty()) {
const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
@@ -1766,8 +1790,8 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
if (StrVal[i] != '\n') continue;
SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
CGF.getTarget());
- Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
- LineLoc.getRawEncoding()));
+ Locs.push_back(llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
}
}
@@ -1905,7 +1929,19 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
}
- unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
+ // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
+ // to the return value slot. Only do this when returning in registers.
+ if (isa<MSAsmStmt>(&S)) {
+ const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
+ if (RetAI.isDirect() || RetAI.isExtend()) {
+ // Make a fake lvalue for the return value slot.
+ LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
+ CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
+ *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
+ ResultRegDests, AsmString, S.getNumOutputs());
+ SawAsmBlock = true;
+ }
+ }
for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
const Expr *InputExpr = S.getInputExpr(i);
@@ -1978,9 +2014,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
StringRef Clobber = S.getClobber(i);
if (Clobber != "memory" && Clobber != "cc")
- Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
+ Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
- if (i != 0 || NumConstraints != 0)
+ if (!Constraints.empty())
Constraints += ',';
Constraints += "~{";
@@ -2018,10 +2054,17 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::Attribute::NoUnwind);
// Slap the source location of the inline asm into a !srcloc metadata on the
- // call. FIXME: Handle metadata for MS-style inline asms.
- if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
+ // call.
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) {
Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(),
*this));
+ } else {
+ // At least put the line number on MS inline asm blobs.
+ auto Loc = llvm::ConstantInt::get(Int32Ty, S.getAsmLoc().getRawEncoding());
+ Result->setMetadata("srcloc",
+ llvm::MDNode::get(getLLVMContext(),
+ llvm::ConstantAsMetadata::get(Loc)));
+ }
// Extract all of the register value results from the asm.
std::vector<llvm::Value*> RegResults;
@@ -2034,6 +2077,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
}
+ assert(RegResults.size() == ResultRegTypes.size());
+ assert(RegResults.size() == ResultTruncRegTypes.size());
+ assert(RegResults.size() == ResultRegDests.size());
for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
llvm::Value *Tmp = RegResults[i];
@@ -2067,46 +2113,35 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
}
-static LValue InitCapturedStruct(CodeGenFunction &CGF, const CapturedStmt &S) {
+LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
const RecordDecl *RD = S.getCapturedRecordDecl();
- QualType RecordTy = CGF.getContext().getRecordType(RD);
+ QualType RecordTy = getContext().getRecordType(RD);
// Initialize the captured struct.
- LValue SlotLV = CGF.MakeNaturalAlignAddrLValue(
- CGF.CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
+ LValue SlotLV = MakeNaturalAlignAddrLValue(
+ CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
RecordDecl::field_iterator CurField = RD->field_begin();
for (CapturedStmt::capture_init_iterator I = S.capture_init_begin(),
E = S.capture_init_end();
I != E; ++I, ++CurField) {
- LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
- CGF.EmitInitializerForField(*CurField, LV, *I, ArrayRef<VarDecl *>());
+ LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
+ if (CurField->hasCapturedVLAType()) {
+ auto VAT = CurField->getCapturedVLAType();
+ EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
+ } else {
+ EmitInitializerForField(*CurField, LV, *I, None);
+ }
}
return SlotLV;
}
-static void InitVLACaptures(CodeGenFunction &CGF, const CapturedStmt &S) {
- for (auto &C : S.captures()) {
- if (C.capturesVariable()) {
- QualType QTy;
- auto VD = C.getCapturedVar();
- if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
- QTy = PVD->getOriginalType();
- else
- QTy = VD->getType();
- if (QTy->isVariablyModifiedType()) {
- CGF.EmitVariablyModifiedType(QTy);
- }
- }
- }
-}
-
/// Generate an outlined function for the body of a CapturedStmt, store any
/// captured variables into the captured struct, and call the outlined function.
llvm::Function *
CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
- LValue CapStruct = InitCapturedStruct(*this, S);
+ LValue CapStruct = InitCapturedStruct(S);
// Emit the CapturedDecl
CodeGenFunction CGF(CGM, true);
@@ -2122,7 +2157,7 @@ CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
llvm::Value *
CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
- LValue CapStruct = InitCapturedStruct(*this, S);
+ LValue CapStruct = InitCapturedStruct(S);
return CapStruct.getAddress();
}
@@ -2163,22 +2198,27 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
// Initialize variable-length arrays.
- InitVLACaptures(*this, S);
+ LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
+ Ctx.getTagDeclType(RD));
+ for (auto *FD : RD->fields()) {
+ if (FD->hasCapturedVLAType()) {
+ auto *ExprArg = EmitLoadOfLValue(EmitLValueForField(Base, FD),
+ S.getLocStart()).getScalarVal();
+ auto VAT = FD->getCapturedVLAType();
+ VLASizeMap[VAT->getSizeExpr()] = ExprArg;
+ }
+ }
// If 'this' is captured, load it into CXXThisValue.
if (CapturedStmtInfo->isCXXThisExprCaptured()) {
FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
- LValue LV = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
- Ctx.getTagDeclType(RD));
- LValue ThisLValue = EmitLValueForField(LV, FD);
+ LValue ThisLValue = EmitLValueForField(Base, FD);
CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
}
PGO.assignRegionCounters(CD, F);
CapturedStmtInfo->EmitBody(*this, CD->getBody());
FinishFunction(CD->getBodyRBrace());
- PGO.emitInstrumentationData();
- PGO.destroyRegionCounters();
return F;
}
diff --git a/lib/CodeGen/CGStmtOpenMP.cpp b/lib/CodeGen/CGStmtOpenMP.cpp
index 867f41592dd9..78fd37ce6562 100644
--- a/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/lib/CodeGen/CGStmtOpenMP.cpp
@@ -14,43 +14,411 @@
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "TargetInfo.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtOpenMP.h"
using namespace clang;
using namespace CodeGen;
+namespace {
+/// \brief RAII for emitting code of CapturedStmt without function outlining.
+class InlinedOpenMPRegion {
+ CodeGenFunction &CGF;
+ CodeGenFunction::CGCapturedStmtInfo *PrevCapturedStmtInfo;
+ const Decl *StoredCurCodeDecl;
+
+ /// \brief A class to emit CapturedStmt construct as inlined statement without
+ /// generating a function for outlined code.
+ class CGInlinedOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
+ public:
+ CGInlinedOpenMPRegionInfo() : CGCapturedStmtInfo() {}
+ };
+
+public:
+ InlinedOpenMPRegion(CodeGenFunction &CGF, const Stmt *S)
+ : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo),
+ StoredCurCodeDecl(CGF.CurCodeDecl) {
+ CGF.CurCodeDecl = cast<CapturedStmt>(S)->getCapturedDecl();
+ CGF.CapturedStmtInfo = new CGInlinedOpenMPRegionInfo();
+ }
+ ~InlinedOpenMPRegion() {
+ delete CGF.CapturedStmtInfo;
+ CGF.CapturedStmtInfo = PrevCapturedStmtInfo;
+ CGF.CurCodeDecl = StoredCurCodeDecl;
+ }
+};
+} // namespace
+
//===----------------------------------------------------------------------===//
// OpenMP Directive Emission
//===----------------------------------------------------------------------===//
-void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
- const CapturedStmt *CS = cast<CapturedStmt>(S.getAssociatedStmt());
- llvm::Value *CapturedStruct = GenerateCapturedStmtArgument(*CS);
+/// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen
+/// function. Here is the logic:
+/// if (Cond) {
+/// CodeGen(true);
+/// } else {
+/// CodeGen(false);
+/// }
+static void EmitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
+ const std::function<void(bool)> &CodeGen) {
+ CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
- llvm::Value *OutlinedFn;
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm of the if/else.
+ bool CondConstant;
+ if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
+ CodeGen(CondConstant);
+ return;
+ }
+
+ // Otherwise, the condition did not fold, or we couldn't elide it. Just
+ // emit the conditional branch.
+ auto ThenBlock = CGF.createBasicBlock(/*name*/ "omp_if.then");
+ auto ElseBlock = CGF.createBasicBlock(/*name*/ "omp_if.else");
+ auto ContBlock = CGF.createBasicBlock(/*name*/ "omp_if.end");
+ CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount*/ 0);
+
+ // Emit the 'then' code.
+ CGF.EmitBlock(ThenBlock);
+ CodeGen(/*ThenBlock*/ true);
+ CGF.EmitBranch(ContBlock);
+ // Emit the 'else' code if present.
+ {
+ // There is no need to emit line number for unconditional branch.
+ ApplyDebugLocation DL(CGF);
+ CGF.EmitBlock(ElseBlock);
+ }
+ CodeGen(/*ThenBlock*/ false);
{
- CodeGenFunction CGF(CGM, true);
- CGCapturedStmtInfo CGInfo(*CS, CS->getCapturedRegionKind());
- CGF.CapturedStmtInfo = &CGInfo;
- OutlinedFn = CGF.GenerateCapturedStmtFunction(*CS);
+ // There is no need to emit line number for unconditional branch.
+ ApplyDebugLocation DL(CGF);
+ CGF.EmitBranch(ContBlock);
+ }
+ // Emit the continuation block for code after the if.
+ CGF.EmitBlock(ContBlock, /*IsFinished*/ true);
+}
+
+void CodeGenFunction::EmitOMPAggregateAssign(LValue OriginalAddr,
+ llvm::Value *PrivateAddr,
+ const Expr *AssignExpr,
+ QualType OriginalType,
+ const VarDecl *VDInit) {
+ EmitBlock(createBasicBlock(".omp.assign.begin."));
+ if (!isa<CXXConstructExpr>(AssignExpr) || isTrivialInitializer(AssignExpr)) {
+ // Perform simple memcpy.
+ EmitAggregateAssign(PrivateAddr, OriginalAddr.getAddress(),
+ AssignExpr->getType());
+ } else {
+ // Perform element-by-element initialization.
+ QualType ElementTy;
+ auto SrcBegin = OriginalAddr.getAddress();
+ auto DestBegin = PrivateAddr;
+ auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
+ auto SrcNumElements = emitArrayLength(ArrayTy, ElementTy, SrcBegin);
+ auto DestNumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin);
+ auto SrcEnd = Builder.CreateGEP(SrcBegin, SrcNumElements);
+ auto DestEnd = Builder.CreateGEP(DestBegin, DestNumElements);
+ // The basic structure here is a do-while loop, because we don't
+ // need to check for the zero-element case.
+ auto BodyBB = createBasicBlock("omp.arraycpy.body");
+ auto DoneBB = createBasicBlock("omp.arraycpy.done");
+ auto IsEmpty =
+ Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
+ Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
+
+ // Enter the loop body, making that address the current address.
+ auto EntryBB = Builder.GetInsertBlock();
+ EmitBlock(BodyBB);
+ auto SrcElementPast = Builder.CreatePHI(SrcBegin->getType(), 2,
+ "omp.arraycpy.srcElementPast");
+ SrcElementPast->addIncoming(SrcEnd, EntryBB);
+ auto DestElementPast = Builder.CreatePHI(DestBegin->getType(), 2,
+ "omp.arraycpy.destElementPast");
+ DestElementPast->addIncoming(DestEnd, EntryBB);
+
+ // Shift the address back by one element.
+ auto NegativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
+ auto DestElement = Builder.CreateGEP(DestElementPast, NegativeOne,
+ "omp.arraycpy.dest.element");
+ auto SrcElement = Builder.CreateGEP(SrcElementPast, NegativeOne,
+ "omp.arraycpy.src.element");
+ {
+ // Create RunCleanScope to cleanup possible temps.
+ CodeGenFunction::RunCleanupsScope Init(*this);
+ // Emit initialization for single element.
+ LocalDeclMap[VDInit] = SrcElement;
+ EmitAnyExprToMem(AssignExpr, DestElement,
+ AssignExpr->getType().getQualifiers(),
+ /*IsInitializer*/ false);
+ LocalDeclMap.erase(VDInit);
+ }
+
+ // Check whether we've reached the end.
+ auto Done =
+ Builder.CreateICmpEQ(DestElement, DestBegin, "omp.arraycpy.done");
+ Builder.CreateCondBr(Done, DoneBB, BodyBB);
+ DestElementPast->addIncoming(DestElement, Builder.GetInsertBlock());
+ SrcElementPast->addIncoming(SrcElement, Builder.GetInsertBlock());
+
+ // Done.
+ EmitBlock(DoneBB, true);
+ }
+ EmitBlock(createBasicBlock(".omp.assign.end."));
+}
+
+void CodeGenFunction::EmitOMPFirstprivateClause(
+ const OMPExecutableDirective &D,
+ CodeGenFunction::OMPPrivateScope &PrivateScope) {
+ auto PrivateFilter = [](const OMPClause *C) -> bool {
+ return C->getClauseKind() == OMPC_firstprivate;
+ };
+ for (OMPExecutableDirective::filtered_clause_iterator<decltype(PrivateFilter)>
+ I(D.clauses(), PrivateFilter); I; ++I) {
+ auto *C = cast<OMPFirstprivateClause>(*I);
+ auto IRef = C->varlist_begin();
+ auto InitsRef = C->inits().begin();
+ for (auto IInit : C->private_copies()) {
+ auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
+ bool IsRegistered;
+ if (*InitsRef != nullptr) {
+ // Emit VarDecl with copy init for arrays.
+ auto *FD = CapturedStmtInfo->lookup(OrigVD);
+ LValue Base = MakeNaturalAlignAddrLValue(
+ CapturedStmtInfo->getContextValue(),
+ getContext().getTagDeclType(FD->getParent()));
+ auto OriginalAddr = EmitLValueForField(Base, FD);
+ auto VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
+ IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value * {
+ auto Emission = EmitAutoVarAlloca(*VD);
+ // Emit initialization of aggregate firstprivate vars.
+ EmitOMPAggregateAssign(OriginalAddr, Emission.getAllocatedAddress(),
+ VD->getInit(), (*IRef)->getType(), VDInit);
+ EmitAutoVarCleanups(Emission);
+ return Emission.getAllocatedAddress();
+ });
+ } else
+ IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value * {
+ // Emit private VarDecl with copy init.
+ EmitDecl(*VD);
+ return GetAddrOfLocalVar(VD);
+ });
+ assert(IsRegistered && "counter already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+ ++IRef, ++InitsRef;
+ }
+ }
+}
+
+void CodeGenFunction::EmitOMPPrivateClause(
+ const OMPExecutableDirective &D,
+ CodeGenFunction::OMPPrivateScope &PrivateScope) {
+ auto PrivateFilter = [](const OMPClause *C) -> bool {
+ return C->getClauseKind() == OMPC_private;
+ };
+ for (OMPExecutableDirective::filtered_clause_iterator<decltype(PrivateFilter)>
+ I(D.clauses(), PrivateFilter); I; ++I) {
+ auto *C = cast<OMPPrivateClause>(*I);
+ auto IRef = C->varlist_begin();
+ for (auto IInit : C->private_copies()) {
+ auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
+ bool IsRegistered =
+ PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value * {
+ // Emit private VarDecl with copy init.
+ EmitDecl(*VD);
+ return GetAddrOfLocalVar(VD);
+ });
+ assert(IsRegistered && "counter already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+ ++IRef;
+ }
+ }
+}
+
+/// \brief Emits code for OpenMP parallel directive in the parallel region.
+static void EmitOMPParallelCall(CodeGenFunction &CGF,
+ const OMPParallelDirective &S,
+ llvm::Value *OutlinedFn,
+ llvm::Value *CapturedStruct) {
+ if (auto C = S.getSingleClause(/*K*/ OMPC_num_threads)) {
+ CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
+ auto NumThreadsClause = cast<OMPNumThreadsClause>(C);
+ auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
+ /*IgnoreResultAssign*/ true);
+ CGF.CGM.getOpenMPRuntime().EmitOMPNumThreadsClause(
+ CGF, NumThreads, NumThreadsClause->getLocStart());
+ }
+ CGF.CGM.getOpenMPRuntime().EmitOMPParallelCall(CGF, S.getLocStart(),
+ OutlinedFn, CapturedStruct);
+}
+
+void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
+ auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
+ auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
+ auto OutlinedFn = CGM.getOpenMPRuntime().EmitOpenMPOutlinedFunction(
+ S, *CS->getCapturedDecl()->param_begin());
+ if (auto C = S.getSingleClause(/*K*/ OMPC_if)) {
+ auto Cond = cast<OMPIfClause>(C)->getCondition();
+ EmitOMPIfClause(*this, Cond, [&](bool ThenBlock) {
+ if (ThenBlock)
+ EmitOMPParallelCall(*this, S, OutlinedFn, CapturedStruct);
+ else
+ CGM.getOpenMPRuntime().EmitOMPSerialCall(*this, S.getLocStart(),
+ OutlinedFn, CapturedStruct);
+ });
+ } else
+ EmitOMPParallelCall(*this, S, OutlinedFn, CapturedStruct);
+}
+
+void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &S,
+ bool SeparateIter) {
+ RunCleanupsScope BodyScope(*this);
+ // Update counters values on current iteration.
+ for (auto I : S.updates()) {
+ EmitIgnoredExpr(I);
+ }
+ // On a continue in the body, jump to the end.
+ auto Continue = getJumpDestInCurrentScope("omp.body.continue");
+ BreakContinueStack.push_back(BreakContinue(JumpDest(), Continue));
+ // Emit loop body.
+ EmitStmt(S.getBody());
+ // The end (updates/cleanups).
+ EmitBlock(Continue.getBlock());
+ BreakContinueStack.pop_back();
+ if (SeparateIter) {
+ // TODO: Update lastprivates if the SeparateIter flag is true.
+ // This will be implemented in a follow-up OMPLastprivateClause patch, but
+ // result should be still correct without it, as we do not make these
+ // variables private yet.
+ }
+}
+
+void CodeGenFunction::EmitOMPInnerLoop(const OMPLoopDirective &S,
+ OMPPrivateScope &LoopScope,
+ bool SeparateIter) {
+ auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
+ auto Cnt = getPGORegionCounter(&S);
+
+ // Start the loop with a block that tests the condition.
+ auto CondBlock = createBasicBlock("omp.inner.for.cond");
+ EmitBlock(CondBlock);
+ LoopStack.push(CondBlock);
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ auto ExitBlock = LoopExit.getBlock();
+ if (LoopScope.requiresCleanups())
+ ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
+
+ auto LoopBody = createBasicBlock("omp.inner.for.body");
+
+ // Emit condition: "IV < LastIteration + 1 [ - 1]"
+ // ("- 1" when lastprivate clause is present - separate one iteration).
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond(SeparateIter));
+ Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock,
+ PGO.createLoopWeights(S.getCond(SeparateIter), Cnt));
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+
+ EmitBlock(LoopBody);
+ Cnt.beginRegion(Builder);
+
+ // Create a block for the increment.
+ auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
+ EmitOMPLoopBody(S);
+ EmitStopPoint(&S);
+
+ // Emit "IV = IV + 1" and a back-edge to the condition block.
+ EmitBlock(Continue.getBlock());
+ EmitIgnoredExpr(S.getInc());
+ BreakContinueStack.pop_back();
+ EmitBranch(CondBlock);
+ LoopStack.pop();
+ // Emit the fall-through block.
+ EmitBlock(LoopExit.getBlock());
+}
+
+void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &S) {
+ auto IC = S.counters().begin();
+ for (auto F : S.finals()) {
+ if (LocalDeclMap.lookup(cast<DeclRefExpr>((*IC))->getDecl())) {
+ EmitIgnoredExpr(F);
+ }
+ ++IC;
}
+}
- // Build call __kmpc_fork_call(loc, 1, microtask, captured_struct/*context*/)
- llvm::Value *Args[] = {
- CGM.getOpenMPRuntime().EmitOpenMPUpdateLocation(*this, S.getLocStart()),
- Builder.getInt32(1), // Number of arguments after 'microtask' argument
- // (there is only one additional argument - 'context')
- Builder.CreateBitCast(OutlinedFn,
- CGM.getOpenMPRuntime().getKmpc_MicroPointerTy()),
- EmitCastToVoidPtr(CapturedStruct)};
- llvm::Constant *RTLFn = CGM.getOpenMPRuntime().CreateRuntimeFunction(
- CGOpenMPRuntime::OMPRTL__kmpc_fork_call);
- EmitRuntimeCall(RTLFn, Args);
+static void EmitOMPAlignedClause(CodeGenFunction &CGF, CodeGenModule &CGM,
+ const OMPAlignedClause &Clause) {
+ unsigned ClauseAlignment = 0;
+ if (auto AlignmentExpr = Clause.getAlignment()) {
+ auto AlignmentCI =
+ cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
+ ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
+ }
+ for (auto E : Clause.varlists()) {
+ unsigned Alignment = ClauseAlignment;
+ if (Alignment == 0) {
+ // OpenMP [2.8.1, Description]
+ // If no optional parameter is specified, implementation-defined default
+ // alignments for SIMD instructions on the target platforms are assumed.
+ Alignment = CGM.getTargetCodeGenInfo().getOpenMPSimdDefaultAlignment(
+ E->getType());
+ }
+ assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
+ "alignment is not power of 2");
+ if (Alignment != 0) {
+ llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
+ CGF.EmitAlignmentAssumption(PtrValue, Alignment);
+ }
+ }
+}
+
+static void EmitPrivateLoopCounters(CodeGenFunction &CGF,
+ CodeGenFunction::OMPPrivateScope &LoopScope,
+ ArrayRef<Expr *> Counters) {
+ for (auto *E : Counters) {
+ auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ bool IsRegistered = LoopScope.addPrivate(VD, [&]() -> llvm::Value * {
+ // Emit var without initialization.
+ auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
+ CGF.EmitAutoVarCleanups(VarEmission);
+ return VarEmission.getAllocatedAddress();
+ });
+ assert(IsRegistered && "counter already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+ }
+ (void)LoopScope.Privatize();
}
void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
- const CapturedStmt *CS = cast<CapturedStmt>(S.getAssociatedStmt());
- const Stmt *Body = CS->getCapturedStmt();
+ // Pragma 'simd' code depends on presence of 'lastprivate'.
+ // If present, we have to separate last iteration of the loop:
+ //
+ // if (LastIteration != 0) {
+ // for (IV in 0..LastIteration-1) BODY;
+ // BODY with updates of lastprivate vars;
+ // <Final counter/linear vars updates>;
+ // }
+ //
+ // otherwise (when there's no lastprivate):
+ //
+ // for (IV in 0..LastIteration) BODY;
+ // <Final counter/linear vars updates>;
+ //
+
+ // Walk clauses and process safelen/lastprivate.
+ bool SeparateIter = false;
LoopStack.setParallel();
LoopStack.setVectorizerEnable(true);
for (auto C : S.clauses()) {
@@ -66,16 +434,181 @@ void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
LoopStack.setParallel(false);
break;
}
+ case OMPC_aligned:
+ EmitOMPAlignedClause(*this, CGM, cast<OMPAlignedClause>(*C));
+ break;
+ case OMPC_lastprivate:
+ SeparateIter = true;
+ break;
default:
// Not handled yet
;
}
}
- EmitStmt(Body);
+
+ InlinedOpenMPRegion Region(*this, S.getAssociatedStmt());
+ RunCleanupsScope DirectiveScope(*this);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
+
+ // Emit the loop iteration variable.
+ const Expr *IVExpr = S.getIterationVariable();
+ const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
+ EmitVarDecl(*IVDecl);
+ EmitIgnoredExpr(S.getInit());
+
+ // Emit the iterations count variable.
+ // If it is not a variable, Sema decided to calculate iterations count on each
+ // iteration (e.g., it is foldable into a constant).
+ if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
+ EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
+ // Emit calculation of the iterations count.
+ EmitIgnoredExpr(S.getCalcLastIteration());
+ }
+
+ if (SeparateIter) {
+ // Emit: if (LastIteration > 0) - begin.
+ RegionCounter Cnt = getPGORegionCounter(&S);
+ auto ThenBlock = createBasicBlock("simd.if.then");
+ auto ContBlock = createBasicBlock("simd.if.end");
+ EmitBranchOnBoolExpr(S.getPreCond(), ThenBlock, ContBlock, Cnt.getCount());
+ EmitBlock(ThenBlock);
+ Cnt.beginRegion(Builder);
+ // Emit 'then' code.
+ {
+ OMPPrivateScope LoopScope(*this);
+ EmitPrivateLoopCounters(*this, LoopScope, S.counters());
+ EmitOMPInnerLoop(S, LoopScope, /* SeparateIter */ true);
+ EmitOMPLoopBody(S, /* SeparateIter */ true);
+ }
+ EmitOMPSimdFinal(S);
+ // Emit: if (LastIteration != 0) - end.
+ EmitBranch(ContBlock);
+ EmitBlock(ContBlock, true);
+ } else {
+ {
+ OMPPrivateScope LoopScope(*this);
+ EmitPrivateLoopCounters(*this, LoopScope, S.counters());
+ EmitOMPInnerLoop(S, LoopScope);
+ }
+ EmitOMPSimdFinal(S);
+ }
+
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
}
-void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &) {
- llvm_unreachable("CodeGen for 'omp for' is not supported yet.");
+/// \brief Emit a helper variable and return corresponding lvalue.
+static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
+ const DeclRefExpr *Helper) {
+ auto VDecl = cast<VarDecl>(Helper->getDecl());
+ CGF.EmitVarDecl(*VDecl);
+ return CGF.EmitLValue(Helper);
+}
+
+void CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
+ // Emit the loop iteration variable.
+ auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
+ auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
+ EmitVarDecl(*IVDecl);
+
+ // Emit the iterations count variable.
+ // If it is not a variable, Sema decided to calculate iterations count on each
+ // iteration (e.g., it is foldable into a constant).
+ if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
+ EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
+ // Emit calculation of the iterations count.
+ EmitIgnoredExpr(S.getCalcLastIteration());
+ }
+
+ auto &RT = CGM.getOpenMPRuntime();
+
+ // Check pre-condition.
+ {
+ // Skip the entire loop if we don't meet the precondition.
+ RegionCounter Cnt = getPGORegionCounter(&S);
+ auto ThenBlock = createBasicBlock("omp.precond.then");
+ auto ContBlock = createBasicBlock("omp.precond.end");
+ EmitBranchOnBoolExpr(S.getPreCond(), ThenBlock, ContBlock, Cnt.getCount());
+ EmitBlock(ThenBlock);
+ Cnt.beginRegion(Builder);
+ // Emit 'then' code.
+ {
+ // Emit helper vars inits.
+ LValue LB =
+ EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
+ LValue UB =
+ EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
+ LValue ST =
+ EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
+ LValue IL =
+ EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
+
+ OMPPrivateScope LoopScope(*this);
+ EmitPrivateLoopCounters(*this, LoopScope, S.counters());
+
+ // Detect the loop schedule kind and chunk.
+ auto ScheduleKind = OMPC_SCHEDULE_unknown;
+ llvm::Value *Chunk = nullptr;
+ if (auto C = cast_or_null<OMPScheduleClause>(
+ S.getSingleClause(OMPC_schedule))) {
+ ScheduleKind = C->getScheduleKind();
+ if (auto Ch = C->getChunkSize()) {
+ Chunk = EmitScalarExpr(Ch);
+ Chunk = EmitScalarConversion(Chunk, Ch->getType(),
+ S.getIterationVariable()->getType());
+ }
+ }
+ const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
+ const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
+ if (RT.isStaticNonchunked(ScheduleKind,
+ /* Chunked */ Chunk != nullptr)) {
+ // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
+ // When no chunk_size is specified, the iteration space is divided into
+ // chunks that are approximately equal in size, and at most one chunk is
+ // distributed to each thread. Note that the size of the chunks is
+ // unspecified in this case.
+ RT.EmitOMPForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
+ IL.getAddress(), LB.getAddress(), UB.getAddress(),
+ ST.getAddress());
+ // UB = min(UB, GlobalUB);
+ EmitIgnoredExpr(S.getEnsureUpperBound());
+ // IV = LB;
+ EmitIgnoredExpr(S.getInit());
+ // while (idx <= UB) { BODY; ++idx; }
+ EmitOMPInnerLoop(S, LoopScope);
+ // Tell the runtime we are done.
+ RT.EmitOMPForFinish(*this, S.getLocStart(), ScheduleKind);
+ } else
+ ErrorUnsupported(&S, "OpenMP loop with requested schedule");
+ }
+ // We're now done with the loop, so jump to the continuation block.
+ EmitBranch(ContBlock);
+ EmitBlock(ContBlock, true);
+ }
+}
+
+void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
+ InlinedOpenMPRegion Region(*this, S.getAssociatedStmt());
+ RunCleanupsScope DirectiveScope(*this);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
+
+ EmitOMPWorksharingLoop(S);
+
+ // Emit an implicit barrier at the end.
+ CGM.getOpenMPRuntime().EmitOMPBarrierCall(*this, S.getLocStart(),
+ /*IsExplicit*/ false);
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
+}
+
+void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &) {
+ llvm_unreachable("CodeGen for 'omp for simd' is not supported yet.");
}
void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &) {
@@ -90,12 +623,24 @@ void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &) {
llvm_unreachable("CodeGen for 'omp single' is not supported yet.");
}
-void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &) {
- llvm_unreachable("CodeGen for 'omp master' is not supported yet.");
+void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
+ CGM.getOpenMPRuntime().EmitOMPMasterRegion(*this, [&]() -> void {
+ InlinedOpenMPRegion Region(*this, S.getAssociatedStmt());
+ RunCleanupsScope Scope(*this);
+ EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ EnsureInsertPoint();
+ }, S.getLocStart());
}
-void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &) {
- llvm_unreachable("CodeGen for 'omp critical' is not supported yet.");
+void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
+ CGM.getOpenMPRuntime().EmitOMPCriticalRegion(
+ *this, S.getDirectiveName().getAsString(), [&]() -> void {
+ InlinedOpenMPRegion Region(*this, S.getAssociatedStmt());
+ RunCleanupsScope Scope(*this);
+ EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ EnsureInsertPoint();
+ }, S.getLocStart());
}
void
@@ -103,6 +648,11 @@ CodeGenFunction::EmitOMPParallelForDirective(const OMPParallelForDirective &) {
llvm_unreachable("CodeGen for 'omp parallel for' is not supported yet.");
}
+void CodeGenFunction::EmitOMPParallelForSimdDirective(
+ const OMPParallelForSimdDirective &) {
+ llvm_unreachable("CodeGen for 'omp parallel for simd' is not supported yet.");
+}
+
void CodeGenFunction::EmitOMPParallelSectionsDirective(
const OMPParallelSectionsDirective &) {
llvm_unreachable("CodeGen for 'omp parallel sections' is not supported yet.");
@@ -116,15 +666,40 @@ void CodeGenFunction::EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &) {
llvm_unreachable("CodeGen for 'omp taskyield' is not supported yet.");
}
-void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &) {
- llvm_unreachable("CodeGen for 'omp barrier' is not supported yet.");
+void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
+ CGM.getOpenMPRuntime().EmitOMPBarrierCall(*this, S.getLocStart());
}
void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &) {
llvm_unreachable("CodeGen for 'omp taskwait' is not supported yet.");
}
-void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &) {
- llvm_unreachable("CodeGen for 'omp flush' is not supported yet.");
+void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
+ CGM.getOpenMPRuntime().EmitOMPFlush(
+ *this, [&]() -> ArrayRef<const Expr *> {
+ if (auto C = S.getSingleClause(/*K*/ OMPC_flush)) {
+ auto FlushClause = cast<OMPFlushClause>(C);
+ return llvm::makeArrayRef(FlushClause->varlist_begin(),
+ FlushClause->varlist_end());
+ }
+ return llvm::None;
+ }(),
+ S.getLocStart());
+}
+
+void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &) {
+ llvm_unreachable("CodeGen for 'omp ordered' is not supported yet.");
+}
+
+void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &) {
+ llvm_unreachable("CodeGen for 'omp atomic' is not supported yet.");
+}
+
+void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) {
+ llvm_unreachable("CodeGen for 'omp target' is not supported yet.");
+}
+
+void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) {
+ llvm_unreachable("CodeGen for 'omp teams' is not supported yet.");
}
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 0df2c43d11b5..acb2a56fab30 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -48,7 +48,7 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true,
- /*DontDefer*/ true);
+ /*DontDefer=*/true, /*IsThunk=*/true);
}
static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
@@ -159,14 +159,10 @@ void CodeGenFunction::GenerateVarArgsThunk(
// with "this".
llvm::Value *ThisPtr = &*AI;
llvm::BasicBlock *EntryBB = Fn->begin();
- llvm::Instruction *ThisStore = nullptr;
- for (llvm::BasicBlock::iterator I = EntryBB->begin(), E = EntryBB->end();
- I != E; I++) {
- if (isa<llvm::StoreInst>(I) && I->getOperand(0) == ThisPtr) {
- ThisStore = cast<llvm::StoreInst>(I);
- break;
- }
- }
+ llvm::Instruction *ThisStore =
+ std::find_if(EntryBB->begin(), EntryBB->end(), [&](llvm::Instruction &I) {
+ return isa<llvm::StoreInst>(I) && I.getOperand(0) == ThisPtr;
+ });
assert(ThisStore && "Store of this should be in entry block?");
// Adjust "this", if necessary.
Builder.SetInsertPoint(ThisStore);
@@ -194,60 +190,71 @@ void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD,
const CGFunctionInfo &FnInfo) {
assert(!CurGD.getDecl() && "CurGD was already set!");
CurGD = GD;
+ CurFuncIsThunk = true;
// Build FunctionArgs.
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
QualType ThisType = MD->getThisType(getContext());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- QualType ResultType =
- CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getReturnType();
+ QualType ResultType = CGM.getCXXABI().HasThisReturn(GD)
+ ? ThisType
+ : CGM.getCXXABI().hasMostDerivedReturn(GD)
+ ? CGM.getContext().VoidPtrTy
+ : FPT->getReturnType();
FunctionArgList FunctionArgs;
// Create the implicit 'this' parameter declaration.
CGM.getCXXABI().buildThisParam(*this, FunctionArgs);
// Add the rest of the parameters.
- for (FunctionDecl::param_const_iterator I = MD->param_begin(),
- E = MD->param_end();
- I != E; ++I)
- FunctionArgs.push_back(*I);
+ FunctionArgs.append(MD->param_begin(), MD->param_end());
if (isa<CXXDestructorDecl>(MD))
CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs);
// Start defining the function.
StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
- MD->getLocation(), SourceLocation());
+ MD->getLocation(), MD->getLocation());
// Since we didn't pass a GlobalDecl to StartFunction, do this ourselves.
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
CXXThisValue = CXXABIThisValue;
}
-void CodeGenFunction::EmitCallAndReturnForThunk(GlobalDecl GD,
- llvm::Value *Callee,
+void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
const ThunkInfo *Thunk) {
assert(isa<CXXMethodDecl>(CurGD.getDecl()) &&
"Please use a new CGF for this thunk");
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl());
// Adjust the 'this' pointer if necessary
llvm::Value *AdjustedThisPtr = Thunk ? CGM.getCXXABI().performThisAdjustment(
*this, LoadCXXThis(), Thunk->This)
: LoadCXXThis();
+ if (CurFnInfo->usesInAlloca()) {
+ // We don't handle return adjusting thunks, because they require us to call
+ // the copy constructor. For now, fall through and pretend the return
+ // adjustment was empty so we don't crash.
+ if (Thunk && !Thunk->Return.isEmpty()) {
+ CGM.ErrorUnsupported(
+ MD, "non-trivial argument copy for return-adjusting thunk");
+ }
+ EmitMustTailThunk(MD, AdjustedThisPtr, Callee);
+ return;
+ }
+
// Start building CallArgs.
CallArgList CallArgs;
QualType ThisType = MD->getThisType(getContext());
CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);
if (isa<CXXDestructorDecl>(MD))
- CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, GD, CallArgs);
+ CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, CurGD, CallArgs);
// Add the rest of the arguments.
- for (FunctionDecl::param_const_iterator I = MD->param_begin(),
- E = MD->param_end(); I != E; ++I)
- EmitDelegateCallArg(CallArgs, *I, (*I)->getLocStart());
+ for (const ParmVarDecl *PD : MD->params())
+ EmitDelegateCallArg(CallArgs, PD, PD->getLocStart());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
@@ -270,8 +277,11 @@ void CodeGenFunction::EmitCallAndReturnForThunk(GlobalDecl GD,
#endif
// Determine whether we have a return value slot to use.
- QualType ResultType =
- CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getReturnType();
+ QualType ResultType = CGM.getCXXABI().HasThisReturn(CurGD)
+ ? ThisType
+ : CGM.getCXXABI().hasMostDerivedReturn(CurGD)
+ ? CGM.getContext().VoidPtrTy
+ : FPT->getReturnType();
ReturnValueSlot Slot;
if (!ResultType->isVoidType() &&
CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
@@ -279,8 +289,9 @@ void CodeGenFunction::EmitCallAndReturnForThunk(GlobalDecl GD,
Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
// Now emit our call.
- RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, MD);
-
+ llvm::Instruction *CallOrInvoke;
+ RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, MD, &CallOrInvoke);
+
// Consider return adjustment if we have ThunkInfo.
if (Thunk && !Thunk->Return.isEmpty())
RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk);
@@ -295,6 +306,62 @@ void CodeGenFunction::EmitCallAndReturnForThunk(GlobalDecl GD,
FinishFunction();
}
+void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
+ llvm::Value *AdjustedThisPtr,
+ llvm::Value *Callee) {
+ // Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery
+ // to translate AST arguments into LLVM IR arguments. For thunks, we know
+ // that the caller prototype more or less matches the callee prototype with
+ // the exception of 'this'.
+ SmallVector<llvm::Value *, 8> Args;
+ for (llvm::Argument &A : CurFn->args())
+ Args.push_back(&A);
+
+ // Set the adjusted 'this' pointer.
+ const ABIArgInfo &ThisAI = CurFnInfo->arg_begin()->info;
+ if (ThisAI.isDirect()) {
+ const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
+ int ThisArgNo = RetAI.isIndirect() && !RetAI.isSRetAfterThis() ? 1 : 0;
+ llvm::Type *ThisType = Args[ThisArgNo]->getType();
+ if (ThisType != AdjustedThisPtr->getType())
+ AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType);
+ Args[ThisArgNo] = AdjustedThisPtr;
+ } else {
+ assert(ThisAI.isInAlloca() && "this is passed directly or inalloca");
+ llvm::Value *ThisAddr = GetAddrOfLocalVar(CXXABIThisDecl);
+ llvm::Type *ThisType =
+ cast<llvm::PointerType>(ThisAddr->getType())->getElementType();
+ if (ThisType != AdjustedThisPtr->getType())
+ AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType);
+ Builder.CreateStore(AdjustedThisPtr, ThisAddr);
+ }
+
+ // Emit the musttail call manually. Even if the prologue pushed cleanups, we
+ // don't actually want to run them.
+ llvm::CallInst *Call = Builder.CreateCall(Callee, Args);
+ Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
+
+ // Apply the standard set of call attributes.
+ unsigned CallingConv;
+ CodeGen::AttributeListType AttributeList;
+ CGM.ConstructAttributeList(*CurFnInfo, MD, AttributeList, CallingConv,
+ /*AttrOnCallSite=*/true);
+ llvm::AttributeSet Attrs =
+ llvm::AttributeSet::get(getLLVMContext(), AttributeList);
+ Call->setAttributes(Attrs);
+ Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+
+ if (Call->getType()->isVoidTy())
+ Builder.CreateRetVoid();
+ else
+ Builder.CreateRet(Call);
+
+ // Finish the function to maintain CodeGenFunction invariants.
+ // FIXME: Don't emit unreachable code.
+ EmitBlock(createBasicBlock());
+ FinishFunction();
+}
+
void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
const CGFunctionInfo &FnInfo,
GlobalDecl GD, const ThunkInfo &Thunk) {
@@ -306,7 +373,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
// Make the call and return the result.
- EmitCallAndReturnForThunk(GD, Callee, &Thunk);
+ EmitCallAndReturnForThunk(Callee, &Thunk);
// Set the right linkage.
CGM.setFunctionLinkage(GD, Fn);
@@ -612,7 +679,8 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
// We're at the end of the translation unit, so the current key
// function is fully correct.
- if (const CXXMethodDecl *keyFunction = Context.getCurrentKeyFunction(RD)) {
+ const CXXMethodDecl *keyFunction = Context.getCurrentKeyFunction(RD);
+ if (keyFunction && !RD->hasAttr<DLLImportAttr>()) {
// If this class has a key function, use that to determine the
// linkage of the vtable.
const FunctionDecl *def = nullptr;
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index 69cf079567e3..e0195a22eb1e 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGVTABLE_H
-#define CLANG_CODEGEN_CGVTABLE_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGVTABLES_H
+#define LLVM_CLANG_LIB_CODEGEN_CGVTABLES_H
#include "clang/AST/BaseSubobject.h"
#include "clang/AST/CharUnits.h"
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 956f3247bd82..82cd9496e00c 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGVALUE_H
-#define CLANG_CODEGEN_CGVALUE_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGVALUE_H
+#define LLVM_CLANG_LIB_CODEGEN_CGVALUE_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 00a87d87a290..10c2409f6bff 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -16,6 +16,15 @@ set(LLVM_LINK_COMPONENTS
TransformUtils
)
+# In a standard Clang+LLVM build, we need to generate intrinsics before
+# building codegen. In a standalone build, LLVM is already built and we don't
+# need this dependency. Furthermore, LLVM doesn't export it so we can't have
+# this dependency.
+set(codegen_deps intrinsics_gen)
+if (CLANG_BUILT_STANDALONE)
+ set(codegen_deps)
+endif()
+
add_clang_library(clangCodeGen
BackendUtil.cpp
CGAtomic.cpp
@@ -57,17 +66,19 @@ add_clang_library(clangCodeGen
CodeGenPGO.cpp
CodeGenTBAA.cpp
CodeGenTypes.cpp
+ CoverageMappingGen.cpp
ItaniumCXXABI.cpp
MicrosoftCXXABI.cpp
ModuleBuilder.cpp
- SanitizerBlacklist.cpp
+ SanitizerMetadata.cpp
TargetInfo.cpp
DEPENDS
- intrinsics_gen
+ ${codegen_deps}
LINK_LIBS
clangAST
clangBasic
clangFrontend
+ clangLex
)
diff --git a/lib/CodeGen/CodeGenABITypes.cpp b/lib/CodeGen/CodeGenABITypes.cpp
index 180cd51940ac..12189ae1aea9 100644
--- a/lib/CodeGen/CodeGenABITypes.cpp
+++ b/lib/CodeGen/CodeGenABITypes.cpp
@@ -26,9 +26,11 @@ using namespace CodeGen;
CodeGenABITypes::CodeGenABITypes(ASTContext &C,
llvm::Module &M,
- const llvm::DataLayout &TD)
+ const llvm::DataLayout &TD,
+ CoverageSourceInfo *CoverageInfo)
: CGO(new CodeGenOptions),
- CGM(new CodeGen::CodeGenModule(C, *CGO, M, TD, C.getDiagnostics())) {
+ CGM(new CodeGen::CodeGenModule(C, *CGO, M, TD, C.getDiagnostics(),
+ CoverageInfo)) {
}
CodeGenABITypes::~CodeGenABITypes()
@@ -65,5 +67,6 @@ CodeGenABITypes::arrangeFreeFunctionCall(CanQualType returnType,
FunctionType::ExtInfo info,
RequiredArgs args) {
return CGM->getTypes().arrangeLLVMFunctionInfo(
- returnType, /*IsInstanceMethod=*/false, argTypes, info, args);
+ returnType, /*IsInstanceMethod=*/false, /*IsChainCall=*/false, argTypes,
+ info, args);
}
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index 04d2cd9d53e4..a6f6fdef335c 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -7,18 +7,20 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/CodeGen/CodeGenAction.h"
+#include "CoverageMappingGen.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/DeclGroup.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclGroup.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/BackendUtil.h"
+#include "clang/CodeGen/CodeGenAction.h"
#include "clang/CodeGen/ModuleBuilder.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/IR/DebugInfo.h"
@@ -59,16 +61,18 @@ namespace clang {
const TargetOptions &targetopts,
const LangOptions &langopts, bool TimePasses,
const std::string &infile, llvm::Module *LinkModule,
- raw_ostream *OS, LLVMContext &C)
+ raw_ostream *OS, LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo = nullptr)
: Diags(_Diags), Action(action), CodeGenOpts(compopts),
TargetOpts(targetopts), LangOpts(langopts), AsmOutStream(OS),
- Context(), LLVMIRGeneration("LLVM IR Generation Time"),
- Gen(CreateLLVMCodeGen(Diags, infile, compopts, targetopts, C)),
+ Context(nullptr), LLVMIRGeneration("LLVM IR Generation Time"),
+ Gen(CreateLLVMCodeGen(Diags, infile, compopts,
+ targetopts, C, CoverageInfo)),
LinkModule(LinkModule) {
llvm::TimePassesIsEnabled = TimePasses;
}
- llvm::Module *takeModule() { return TheModule.release(); }
+ std::unique_ptr<llvm::Module> takeModule() { return std::move(TheModule); }
llvm::Module *takeLinkModule() { return LinkModule.release(); }
void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) override {
@@ -149,13 +153,10 @@ namespace clang {
// Link LinkModule into this module if present, preserving its validity.
if (LinkModule) {
- std::string ErrorMsg;
- if (Linker::LinkModules(M, LinkModule.get(), Linker::PreserveSource,
- &ErrorMsg)) {
- Diags.Report(diag::err_fe_cannot_link_module)
- << LinkModule->getModuleIdentifier() << ErrorMsg;
+ if (Linker::LinkModules(
+ M, LinkModule.get(),
+ [=](const DiagnosticInfo &DI) { linkerDiagnosticHandler(DI); }))
return;
- }
}
// Install an inline asm handler so that diagnostics get printed through
@@ -218,6 +219,8 @@ namespace clang {
((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc);
}
+ void linkerDiagnosticHandler(const llvm::DiagnosticInfo &DI);
+
static void DiagnosticHandler(const llvm::DiagnosticInfo &DI,
void *Context) {
((BackendConsumer *)Context)->DiagnosticHandlerImpl(DI);
@@ -269,11 +272,11 @@ static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D,
// Create the copy and transfer ownership to clang::SourceManager.
// TODO: Avoid copying files into memory.
- llvm::MemoryBuffer *CBuf =
- llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(),
- LBuf->getBufferIdentifier());
+ std::unique_ptr<llvm::MemoryBuffer> CBuf =
+ llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(),
+ LBuf->getBufferIdentifier());
// FIXME: Keep a file ID map instead of creating new IDs for each location.
- FileID FID = CSM.createFileID(CBuf);
+ FileID FID = CSM.createFileID(std::move(CBuf));
// Translate the offset into the file.
unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart();
@@ -493,6 +496,21 @@ void BackendConsumer::OptimizationFailureHandler(
EmitOptimizationMessage(D, diag::warn_fe_backend_optimization_failure);
}
+void BackendConsumer::linkerDiagnosticHandler(const DiagnosticInfo &DI) {
+ if (DI.getSeverity() != DS_Error)
+ return;
+
+ std::string MsgStorage;
+ {
+ raw_string_ostream Stream(MsgStorage);
+ DiagnosticPrinterRawOStream DP(Stream);
+ DI.print(DP);
+ }
+
+ Diags.Report(diag::err_fe_cannot_link_module)
+ << LinkModule->getModuleIdentifier() << MsgStorage;
+}
+
/// \brief This function is invoked when the backend needs
/// to report something to the user.
void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
@@ -572,10 +590,12 @@ void CodeGenAction::EndSourceFileAction() {
BEConsumer->takeLinkModule();
// Steal the module from the consumer.
- TheModule.reset(BEConsumer->takeModule());
+ TheModule = BEConsumer->takeModule();
}
-llvm::Module *CodeGenAction::takeModule() { return TheModule.release(); }
+std::unique_ptr<llvm::Module> CodeGenAction::takeModule() {
+ return std::move(TheModule);
+}
llvm::LLVMContext *CodeGenAction::takeLLVMContext() {
OwnsVMContext = false;
@@ -603,8 +623,8 @@ static raw_ostream *GetOutputStream(CompilerInstance &CI,
llvm_unreachable("Invalid action!");
}
-ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
BackendAction BA = static_cast<BackendAction>(Act);
std::unique_ptr<raw_ostream> OS(GetOutputStream(CI, InFile, BA));
if (BA != Backend_EmitNothing && !OS)
@@ -616,18 +636,15 @@ ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
// loaded from bitcode, do so now.
const std::string &LinkBCFile = CI.getCodeGenOpts().LinkBitcodeFile;
if (!LinkModuleToUse && !LinkBCFile.empty()) {
- std::string ErrorStr;
-
- llvm::MemoryBuffer *BCBuf =
- CI.getFileManager().getBufferForFile(LinkBCFile, &ErrorStr);
+ auto BCBuf = CI.getFileManager().getBufferForFile(LinkBCFile);
if (!BCBuf) {
CI.getDiagnostics().Report(diag::err_cannot_open_file)
- << LinkBCFile << ErrorStr;
+ << LinkBCFile << BCBuf.getError().message();
return nullptr;
}
ErrorOr<llvm::Module *> ModuleOrErr =
- getLazyBitcodeModule(BCBuf, *VMContext);
+ getLazyBitcodeModule(std::move(*BCBuf), *VMContext);
if (std::error_code EC = ModuleOrErr.getError()) {
CI.getDiagnostics().Report(diag::err_cannot_open_file)
<< LinkBCFile << EC.message();
@@ -636,11 +653,19 @@ ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
LinkModuleToUse = ModuleOrErr.get();
}
- BEConsumer = new BackendConsumer(BA, CI.getDiagnostics(), CI.getCodeGenOpts(),
- CI.getTargetOpts(), CI.getLangOpts(),
- CI.getFrontendOpts().ShowTimers, InFile,
- LinkModuleToUse, OS.release(), *VMContext);
- return BEConsumer;
+ CoverageSourceInfo *CoverageInfo = nullptr;
+ // Add the preprocessor callback only when the coverage mapping is generated.
+ if (CI.getCodeGenOpts().CoverageMapping) {
+ CoverageInfo = new CoverageSourceInfo;
+ CI.getPreprocessor().addPPCallbacks(
+ std::unique_ptr<PPCallbacks>(CoverageInfo));
+ }
+ std::unique_ptr<BackendConsumer> Result(new BackendConsumer(
+ BA, CI.getDiagnostics(), CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getLangOpts(), CI.getFrontendOpts().ShowTimers, InFile,
+ LinkModuleToUse, OS.release(), *VMContext, CoverageInfo));
+ BEConsumer = Result.get();
+ return std::move(Result);
}
void CodeGenAction::ExecuteAction() {
@@ -660,7 +685,7 @@ void CodeGenAction::ExecuteAction() {
return;
llvm::SMDiagnostic Err;
- TheModule.reset(ParseIR(MainFile, Err, *VMContext));
+ TheModule = parseIR(MainFile->getMemBufferRef(), Err, *VMContext);
if (!TheModule) {
// Translate from the diagnostic info to the SourceManager location if
// available.
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 5ca3a78bb4fa..826171a46e23 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -37,17 +37,18 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
: CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
Builder(cgm.getModule().getContext(), llvm::ConstantFolder(),
CGBuilderInserterTy(this)),
- CapturedStmtInfo(nullptr), SanOpts(&CGM.getLangOpts().Sanitize),
- IsSanitizerScope(false), AutoreleaseResult(false), BlockInfo(nullptr),
- BlockPointer(nullptr), LambdaThisCaptureField(nullptr),
- NormalCleanupDest(nullptr), NextCleanupDestIndex(1),
- FirstBlockInfo(nullptr), EHResumeBlock(nullptr), ExceptionSlot(nullptr),
- EHSelectorSlot(nullptr), DebugInfo(CGM.getModuleDebugInfo()),
- DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr),
- PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr),
- CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0),
- NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr),
- CXXABIThisValue(nullptr), CXXThisValue(nullptr),
+ CurFn(nullptr), CapturedStmtInfo(nullptr),
+ SanOpts(CGM.getLangOpts().Sanitize), IsSanitizerScope(false),
+ CurFuncIsThunk(false), AutoreleaseResult(false), SawAsmBlock(false),
+ BlockInfo(nullptr), BlockPointer(nullptr),
+ LambdaThisCaptureField(nullptr), NormalCleanupDest(nullptr),
+ NextCleanupDestIndex(1), FirstBlockInfo(nullptr), EHResumeBlock(nullptr),
+ ExceptionSlot(nullptr), EHSelectorSlot(nullptr),
+ DebugInfo(CGM.getModuleDebugInfo()), DisableDebugInfo(false),
+ DidCallStackSave(false), IndirectBranch(nullptr), PGO(cgm),
+ SwitchInsn(nullptr), SwitchWeights(nullptr), CaseRangeBlock(nullptr),
+ UnreachableBlock(nullptr), NumReturnExprs(0), NumSimpleReturnExprs(0),
+ CXXABIThisDecl(nullptr), CXXABIThisValue(nullptr), CXXThisValue(nullptr),
CXXDefaultInitExprThis(nullptr), CXXStructorImplicitParamDecl(nullptr),
CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
@@ -62,6 +63,12 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
FMF.setNoNaNs();
FMF.setNoInfs();
}
+ if (CGM.getCodeGenOpts().NoNaNsFPMath) {
+ FMF.setNoNaNs();
+ }
+ if (CGM.getCodeGenOpts().NoSignedZeros) {
+ FMF.setNoSignedZeros();
+ }
Builder.SetFastMathFlags(FMF);
}
@@ -79,6 +86,17 @@ CodeGenFunction::~CodeGenFunction() {
}
}
+LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+ CharUnits Alignment;
+ if (CGM.getCXXABI().isTypeInfoCalculable(T)) {
+ Alignment = getContext().getTypeAlignInChars(T);
+ unsigned MaxAlign = getContext().getLangOpts().MaxTypeAlign;
+ if (MaxAlign && Alignment.getQuantity() > MaxAlign &&
+ !getContext().isAlignmentRequired(T))
+ Alignment = CharUnits::fromQuantity(MaxAlign);
+ }
+ return LValue::MakeAddr(V, T, Alignment, getContext(), CGM.getTBAAInfo(T));
+}
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
return CGM.getTypes().ConvertTypeForMem(T);
@@ -140,7 +158,7 @@ TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
}
}
-void CodeGenFunction::EmitReturnBlock() {
+llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
// For cleanliness, we try to avoid emitting the return block for
// simple cases.
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
@@ -155,7 +173,7 @@ void CodeGenFunction::EmitReturnBlock() {
delete ReturnBlock.getBlock();
} else
EmitBlock(ReturnBlock.getBlock());
- return;
+ return llvm::DebugLoc();
}
// Otherwise, if the return block is the target of a single direct
@@ -166,15 +184,13 @@ void CodeGenFunction::EmitReturnBlock() {
dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
if (BI && BI->isUnconditional() &&
BI->getSuccessor(0) == ReturnBlock.getBlock()) {
- // Reset insertion point, including debug location, and delete the
- // branch. This is really subtle and only works because the next change
- // in location will hit the caching in CGDebugInfo::EmitLocation and not
- // override this.
- Builder.SetCurrentDebugLocation(BI->getDebugLoc());
+ // Record/return the DebugLoc of the simple 'return' expression to be used
+ // later by the actual 'ret' instruction.
+ llvm::DebugLoc Loc = BI->getDebugLoc();
Builder.SetInsertPoint(BI->getParent());
BI->eraseFromParent();
delete ReturnBlock.getBlock();
- return;
+ return Loc;
}
}
@@ -183,6 +199,7 @@ void CodeGenFunction::EmitReturnBlock() {
// region.end for now.
EmitBlock(ReturnBlock.getBlock());
+ return llvm::DebugLoc();
}
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
@@ -236,16 +253,18 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
}
// Emit function epilog (to return).
- EmitReturnBlock();
+ llvm::DebugLoc Loc = EmitReturnBlock();
if (ShouldInstrumentFunction())
EmitFunctionInstrumentation("__cyg_profile_func_exit");
// Emit debug descriptor for function end.
- if (CGDebugInfo *DI = getDebugInfo()) {
+ if (CGDebugInfo *DI = getDebugInfo())
DI->EmitFunctionEnd(Builder);
- }
+ // Reset the debug location to that of the simple 'return' expression, if any
+ // rather than that of the end of the function's scope '}'.
+ ApplyDebugLocation AL(*this, Loc);
EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
EmitEndEHSpec(CurCodeDecl);
@@ -337,9 +356,9 @@ void CodeGenFunction::EmitMCountInstrumentation() {
// information in the program executable. The argument information stored
// includes the argument name, its type, the address and access qualifiers used.
static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
- CodeGenModule &CGM,llvm::LLVMContext &Context,
- SmallVector <llvm::Value*, 5> &kernelMDArgs,
- CGBuilderTy& Builder, ASTContext &ASTCtx) {
+ CodeGenModule &CGM, llvm::LLVMContext &Context,
+ SmallVector<llvm::Metadata *, 5> &kernelMDArgs,
+ CGBuilderTy &Builder, ASTContext &ASTCtx) {
// Create MDNodes that represent the kernel arg metadata.
// Each MDNode is a list in the form of "key", N number of values which is
// the same number of values as their are kernel arguments.
@@ -347,23 +366,28 @@ static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy();
// MDNode for the kernel argument address space qualifiers.
- SmallVector<llvm::Value*, 8> addressQuals;
+ SmallVector<llvm::Metadata *, 8> addressQuals;
addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space"));
// MDNode for the kernel argument access qualifiers (images only).
- SmallVector<llvm::Value*, 8> accessQuals;
+ SmallVector<llvm::Metadata *, 8> accessQuals;
accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual"));
// MDNode for the kernel argument type names.
- SmallVector<llvm::Value*, 8> argTypeNames;
+ SmallVector<llvm::Metadata *, 8> argTypeNames;
argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type"));
+ // MDNode for the kernel argument base type names.
+ SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
+ argBaseTypeNames.push_back(
+ llvm::MDString::get(Context, "kernel_arg_base_type"));
+
// MDNode for the kernel argument type qualifiers.
- SmallVector<llvm::Value*, 8> argTypeQuals;
+ SmallVector<llvm::Metadata *, 8> argTypeQuals;
argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual"));
// MDNode for the kernel argument names.
- SmallVector<llvm::Value*, 8> argNames;
+ SmallVector<llvm::Metadata *, 8> argNames;
argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name"));
for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
@@ -375,8 +399,8 @@ static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
QualType pointeeTy = ty->getPointeeType();
// Get address qualifier.
- addressQuals.push_back(Builder.getInt32(ASTCtx.getTargetAddressSpace(
- pointeeTy.getAddressSpace())));
+ addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(
+ ASTCtx.getTargetAddressSpace(pointeeTy.getAddressSpace()))));
// Get argument type name.
std::string typeName =
@@ -384,11 +408,23 @@ static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
// Turn "unsigned type" to "utype"
std::string::size_type pos = typeName.find("unsigned");
- if (pos != std::string::npos)
+ if (pointeeTy.isCanonical() && pos != std::string::npos)
typeName.erase(pos+1, 8);
argTypeNames.push_back(llvm::MDString::get(Context, typeName));
+ std::string baseTypeName =
+ pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
+ Policy) +
+ "*";
+
+ // Turn "unsigned type" to "utype"
+ pos = baseTypeName.find("unsigned");
+ if (pos != std::string::npos)
+ baseTypeName.erase(pos+1, 8);
+
+ argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
+
// Get argument type qualifiers:
if (ty.isRestrictQualified())
typeQuals = "restrict";
@@ -403,18 +439,29 @@ static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
AddrSpc =
CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
- addressQuals.push_back(Builder.getInt32(AddrSpc));
+ addressQuals.push_back(
+ llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc)));
// Get argument type name.
std::string typeName = ty.getUnqualifiedType().getAsString(Policy);
// Turn "unsigned type" to "utype"
std::string::size_type pos = typeName.find("unsigned");
- if (pos != std::string::npos)
+ if (ty.isCanonical() && pos != std::string::npos)
typeName.erase(pos+1, 8);
argTypeNames.push_back(llvm::MDString::get(Context, typeName));
+ std::string baseTypeName =
+ ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
+
+ // Turn "unsigned type" to "utype"
+ pos = baseTypeName.find("unsigned");
+ if (pos != std::string::npos)
+ baseTypeName.erase(pos+1, 8);
+
+ argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
+
// Get argument type qualifiers:
if (ty.isConstQualified())
typeQuals = "const";
@@ -442,8 +489,10 @@ static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals));
kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals));
kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames));
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, argBaseTypeNames));
kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals));
- kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
+ if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
}
void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
@@ -454,12 +503,11 @@ void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
llvm::LLVMContext &Context = getLLVMContext();
- SmallVector <llvm::Value*, 5> kernelMDArgs;
- kernelMDArgs.push_back(Fn);
+ SmallVector<llvm::Metadata *, 5> kernelMDArgs;
+ kernelMDArgs.push_back(llvm::ConstantAsMetadata::get(Fn));
- if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
- GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs,
- Builder, getContext());
+ GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs, Builder,
+ getContext());
if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
QualType hintQTy = A->getTypeHint();
@@ -467,33 +515,31 @@ void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
bool isSignedInteger =
hintQTy->isSignedIntegerType() ||
(hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType());
- llvm::Value *attrMDArgs[] = {
- llvm::MDString::get(Context, "vec_type_hint"),
- llvm::UndefValue::get(CGM.getTypes().ConvertType(A->getTypeHint())),
- llvm::ConstantInt::get(
- llvm::IntegerType::get(Context, 32),
- llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0)))
- };
+ llvm::Metadata *attrMDArgs[] = {
+ llvm::MDString::get(Context, "vec_type_hint"),
+ llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
+ CGM.getTypes().ConvertType(A->getTypeHint()))),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::IntegerType::get(Context, 32),
+ llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0))))};
kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
}
if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
- llvm::Value *attrMDArgs[] = {
- llvm::MDString::get(Context, "work_group_size_hint"),
- Builder.getInt32(A->getXDim()),
- Builder.getInt32(A->getYDim()),
- Builder.getInt32(A->getZDim())
- };
+ llvm::Metadata *attrMDArgs[] = {
+ llvm::MDString::get(Context, "work_group_size_hint"),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
}
if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
- llvm::Value *attrMDArgs[] = {
- llvm::MDString::get(Context, "reqd_work_group_size"),
- Builder.getInt32(A->getXDim()),
- Builder.getInt32(A->getYDim()),
- Builder.getInt32(A->getZDim())
- };
+ llvm::Metadata *attrMDArgs[] = {
+ llvm::MDString::get(Context, "reqd_work_group_size"),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
}
@@ -526,6 +572,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
const FunctionArgList &Args,
SourceLocation Loc,
SourceLocation StartLoc) {
+ assert(!CurFn &&
+ "Do not use a CodeGenFunction object for more than one function");
+
const Decl *D = GD.getDecl();
DidCallStackSave = false;
@@ -536,8 +585,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
CurFnInfo = &FnInfo;
assert(CurFn->isDeclaration() && "Function already has body?");
- if (CGM.getSanitizerBlacklist().isIn(*Fn))
- SanOpts = &SanitizerOptions::Disabled;
+ if (CGM.isInSanitizerBlacklist(Fn, Loc))
+ SanOpts.clear();
// Pass inline keyword to optimizer if it appears explicitly on any
// declaration. Also, in the case of -fno-inline attach NoInline
@@ -560,17 +609,17 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
}
// If we are checking function types, emit a function type signature as
- // prefix data.
- if (getLangOpts().CPlusPlus && SanOpts->Function) {
+ // prologue data.
+ if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (llvm::Constant *PrefixSig =
+ if (llvm::Constant *PrologueSig =
CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
llvm::Constant *FTRTTIConst =
CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true);
- llvm::Constant *PrefixStructElems[] = { PrefixSig, FTRTTIConst };
- llvm::Constant *PrefixStructConst =
- llvm::ConstantStruct::getAnon(PrefixStructElems, /*Packed=*/true);
- Fn->setPrefixData(PrefixStructConst);
+ llvm::Constant *PrologueStructElems[] = { PrologueSig, FTRTTIConst };
+ llvm::Constant *PrologueStructConst =
+ llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
+ Fn->setPrologueData(PrologueStructConst);
}
}
}
@@ -663,6 +712,14 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
CXXThisValue = EmitLoadOfLValue(ThisLValue,
SourceLocation()).getScalarVal();
}
+ for (auto *FD : MD->getParent()->fields()) {
+ if (FD->hasCapturedVLAType()) {
+ auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
+ SourceLocation()).getScalarVal();
+ auto VAT = FD->getCapturedVLAType();
+ VLASizeMap[VAT->getSizeExpr()] = ExprArg;
+ }
+ }
} else {
// Not in a lambda; just use 'this' from the method.
// FIXME: Should we generate a new load for each use of 'this'? The
@@ -771,11 +828,12 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
if (MD && MD->isInstance()) {
if (CGM.getCXXABI().HasThisReturn(GD))
ResTy = MD->getThisType(getContext());
+ else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
+ ResTy = CGM.getContext().VoidPtrTy;
CGM.getCXXABI().buildThisParam(*this, Args);
}
-
- for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
- Args.push_back(FD->getParamDecl(i));
+
+ Args.append(FD->param_begin(), FD->param_end());
if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
@@ -801,6 +859,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
// Generate the body of the function.
+ PGO.checkGlobalDecl(GD);
PGO.assignRegionCounters(GD.getDecl(), CurFn);
if (isa<CXXDestructorDecl>(FD))
EmitDestructorBody(Args);
@@ -842,13 +901,14 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// C11 6.9.1p12:
// If the '}' that terminates a function is reached, and the value of the
// function call is used by the caller, the behavior is undefined.
- if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() &&
+ if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
!FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
- if (SanOpts->Return) {
+ if (SanOpts.has(SanitizerKind::Return)) {
SanitizerScope SanScope(this);
- EmitCheck(Builder.getFalse(), "missing_return",
- EmitCheckSourceLocation(FD->getLocation()),
- ArrayRef<llvm::Value *>(), CRK_Unrecoverable);
+ llvm::Value *IsFalse = Builder.getFalse();
+ EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
+ "missing_return", EmitCheckSourceLocation(FD->getLocation()),
+ None);
} else if (CGM.getCodeGenOpts().OptimizationLevel == 0)
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap));
Builder.CreateUnreachable();
@@ -862,9 +922,6 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// a quick pass now to see if we can.
if (!CurFn->doesNotThrow())
TryMarkNoThrow(CurFn);
-
- PGO.emitInstrumentationData();
- PGO.destroyRegionCounters();
}
/// ContainsLabel - Return true if the statement contains a label in it. If
@@ -1499,7 +1556,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
// If the size is an expression that is not an integer constant
// expression [...] each time it is evaluated it shall have a value
// greater than zero.
- if (SanOpts->VLABound &&
+ if (SanOpts.has(SanitizerKind::VLABound) &&
size->getType()->isSignedIntegerType()) {
SanitizerScope SanScope(this);
llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
@@ -1507,9 +1564,9 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
EmitCheckSourceLocation(size->getLocStart()),
EmitCheckTypeDescriptor(size->getType())
};
- EmitCheck(Builder.CreateICmpSGT(Size, Zero),
- "vla_bound_not_positive", StaticArgs, Size,
- CRK_Recoverable);
+ EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
+ SanitizerKind::VLABound),
+ "vla_bound_not_positive", StaticArgs, Size);
}
// Always zexting here would be wrong if it weren't
@@ -1655,11 +1712,8 @@ void CodeGenFunction::InsertHelper(llvm::Instruction *I,
llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const {
LoopStack.InsertHelper(I);
- if (IsSanitizerScope) {
- I->setMetadata(
- CGM.getModule().getMDKindID("nosanitize"),
- llvm::MDNode::get(CGM.getLLVMContext(), ArrayRef<llvm::Value *>()));
- }
+ if (IsSanitizerScope)
+ CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
}
template <bool PreserveNames>
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 59cc30da42c5..3a990d21490d 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
-#define CLANG_CODEGEN_CODEGENFUNCTION_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
#include "CGBuilder.h"
#include "CGDebugInfo.h"
@@ -93,19 +93,6 @@ enum TypeEvaluationKind {
TEK_Aggregate
};
-class SuppressDebugLocation {
- llvm::DebugLoc CurLoc;
- llvm::IRBuilderBase &Builder;
-public:
- SuppressDebugLocation(llvm::IRBuilderBase &Builder)
- : CurLoc(Builder.getCurrentDebugLocation()), Builder(Builder) {
- Builder.SetCurrentDebugLocation(llvm::DebugLoc());
- }
- ~SuppressDebugLocation() {
- Builder.SetCurrentDebugLocation(CurLoc);
- }
-};
-
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
class CodeGenFunction : public CodeGenTypeCache {
@@ -182,6 +169,8 @@ public:
/// \brief API for captured statement code generation.
class CGCapturedStmtInfo {
public:
+ explicit CGCapturedStmtInfo(CapturedRegionKind K = CR_Default)
+ : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
explicit CGCapturedStmtInfo(const CapturedStmt &S,
CapturedRegionKind K = CR_Default)
: Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
@@ -193,7 +182,7 @@ public:
I != E; ++I, ++Field) {
if (I->capturesThis())
CXXThisFieldDecl = *Field;
- else
+ else if (I->capturesVariable())
CaptureFields[I->getCapturedVar()] = *Field;
}
}
@@ -214,6 +203,10 @@ public:
bool isCXXThisExprCaptured() const { return CXXThisFieldDecl != nullptr; }
FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
+ static bool classof(const CGCapturedStmtInfo *) {
+ return true;
+ }
+
/// \brief Emit the captured statement body.
virtual void EmitBody(CodeGenFunction &CGF, Stmt *S) {
RegionCounter Cnt = CGF.getPGORegionCounter(S);
@@ -244,8 +237,8 @@ public:
/// potentially higher performance penalties.
unsigned char BoundsChecking;
- /// \brief Sanitizer options to use for this function.
- const SanitizerOptions *SanOpts;
+ /// \brief Sanitizers enabled for this function.
+ SanitizerSet SanOpts;
/// \brief True if CodeGen currently emits code implementing sanitizer checks.
bool IsSanitizerScope;
@@ -258,9 +251,17 @@ public:
~SanitizerScope();
};
+ /// In C++, whether we are code generating a thunk. This controls whether we
+ /// should emit cleanups.
+ bool CurFuncIsThunk;
+
/// In ARC, whether we should autorelease the return value.
bool AutoreleaseResult;
+ /// Whether we processed a Microsoft-style asm block during CodeGen. These can
+ /// potentially set the return value.
+ bool SawAsmBlock;
+
const CodeGen::CGBlockInfo *BlockInfo;
llvm::Value *BlockPointer;
@@ -277,11 +278,11 @@ public:
/// Header for data within LifetimeExtendedCleanupStack.
struct LifetimeExtendedCleanupHeader {
/// The size of the following cleanup object.
- size_t Size : 29;
+ unsigned Size : 29;
/// The kind of cleanup to push: a value from the CleanupKind enumeration.
unsigned Kind : 3;
- size_t getSize() const { return Size; }
+ size_t getSize() const { return size_t(Size); }
CleanupKind getKind() const { return static_cast<CleanupKind>(Kind); }
};
@@ -531,7 +532,7 @@ public:
}
};
- class LexicalScope: protected RunCleanupsScope {
+ class LexicalScope : public RunCleanupsScope {
SourceRange Range;
SmallVector<const LabelDecl*, 4> Labels;
LexicalScope *ParentScope;
@@ -577,6 +578,67 @@ public:
void rescopeLabels();
};
+ /// \brief The scope used to remap some variables as private in the OpenMP
+ /// loop body (or other captured region emitted without outlining), and to
+ /// restore old vars back on exit.
+ class OMPPrivateScope : public RunCleanupsScope {
+ typedef llvm::DenseMap<const VarDecl *, llvm::Value *> VarDeclMapTy;
+ VarDeclMapTy SavedLocals;
+ VarDeclMapTy SavedPrivates;
+
+ private:
+ OMPPrivateScope(const OMPPrivateScope &) LLVM_DELETED_FUNCTION;
+ void operator=(const OMPPrivateScope &) LLVM_DELETED_FUNCTION;
+
+ public:
+ /// \brief Enter a new OpenMP private scope.
+ explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
+
+ /// \brief Registers \a LocalVD variable as a private and apply \a
+ /// PrivateGen function for it to generate corresponding private variable.
+ /// \a PrivateGen returns an address of the generated private variable.
+ /// \return true if the variable is registered as private, false if it has
+ /// been privatized already.
+ bool
+ addPrivate(const VarDecl *LocalVD,
+ const std::function<llvm::Value *()> &PrivateGen) {
+ assert(PerformCleanup && "adding private to dead scope");
+ if (SavedLocals.count(LocalVD) > 0) return false;
+ SavedLocals[LocalVD] = CGF.LocalDeclMap.lookup(LocalVD);
+ CGF.LocalDeclMap.erase(LocalVD);
+ SavedPrivates[LocalVD] = PrivateGen();
+ CGF.LocalDeclMap[LocalVD] = SavedLocals[LocalVD];
+ return true;
+ }
+
+ /// \brief Privatizes local variables previously registered as private.
+ /// Registration is separate from the actual privatization to allow
+ /// initializers use values of the original variables, not the private one.
+ /// This is important, for example, if the private variable is a class
+ /// variable initialized by a constructor that references other private
+ /// variables. But at initialization original variables must be used, not
+ /// private copies.
+ /// \return true if at least one variable was privatized, false otherwise.
+ bool Privatize() {
+ for (auto VDPair : SavedPrivates) {
+ CGF.LocalDeclMap[VDPair.first] = VDPair.second;
+ }
+ SavedPrivates.clear();
+ return !SavedLocals.empty();
+ }
+
+ void ForceCleanup() {
+ RunCleanupsScope::ForceCleanup();
+ // Remap vars back to the original values.
+ for (auto I : SavedLocals) {
+ CGF.LocalDeclMap[I.first] = I.second;
+ }
+ SavedLocals.clear();
+ }
+
+ /// \brief Exit scope - all the mapped variables are restored.
+ ~OMPPrivateScope() { ForceCleanup(); }
+ };
/// \brief Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added.
@@ -1062,6 +1124,9 @@ public:
void pushLifetimeExtendedDestroy(CleanupKind kind, llvm::Value *addr,
QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
+ void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
+ llvm::Value *CompletePtr,
+ QualType ElementType);
void pushStackRestore(CleanupKind kind, llvm::Value *SPMem);
void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
@@ -1101,9 +1166,7 @@ public:
void GenerateObjCMethod(const ObjCMethodDecl *OMD);
- void StartObjCMethod(const ObjCMethodDecl *MD,
- const ObjCContainerDecl *CD,
- SourceLocation StartLoc);
+ void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD);
/// GenerateObjCGetter - Synthesize an Objective-C property getter function.
void GenerateObjCGetter(ObjCImplementationDecl *IMP,
@@ -1193,10 +1256,11 @@ public:
void EmitLambdaBlockInvokeBody();
void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
+ void EmitAsanPrologueOrEpilogue(bool Prologue);
/// EmitReturnBlock - Emit the unified return block, trying to avoid its
/// emission when possible.
- void EmitReturnBlock();
+ llvm::DebugLoc EmitReturnBlock();
/// FinishFunction - Complete IR generation of the current function. It is
/// legal to call this function even if there is no current insertion point.
@@ -1204,8 +1268,11 @@ public:
void StartThunk(llvm::Function *Fn, GlobalDecl GD, const CGFunctionInfo &FnInfo);
- void EmitCallAndReturnForThunk(GlobalDecl GD, llvm::Value *Callee,
- const ThunkInfo *Thunk);
+ void EmitCallAndReturnForThunk(llvm::Value *Callee, const ThunkInfo *Thunk);
+
+ /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
+ void EmitMustTailThunk(const CXXMethodDecl *MD, llvm::Value *AdjustedThisPtr,
+ llvm::Value *Callee);
/// GenerateThunk - Generate a thunk for the given method.
void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
@@ -1390,13 +1457,7 @@ public:
CGM.getTBAAInfo(T));
}
- LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
- CharUnits Alignment;
- if (!T->isIncompleteType())
- Alignment = getContext().getTypeAlignInChars(T);
- return LValue::MakeAddr(V, T, Alignment, getContext(),
- CGM.getTBAAInfo(T));
- }
+ LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block. The caller is responsible for setting an appropriate alignment on
@@ -1468,8 +1529,8 @@ public:
/// EmitExprAsInit - Emits the code necessary to initialize a
/// location in memory with the given initializer.
- void EmitExprAsInit(const Expr *init, const ValueDecl *D,
- LValue lvalue, bool capturedByInit);
+ void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
+ bool capturedByInit);
/// hasVolatileMember - returns true if aggregate type has a volatile
/// member.
@@ -1610,7 +1671,7 @@ public:
const CXXRecordDecl *Derived,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd,
- bool NullCheckValue);
+ bool NullCheckValue, SourceLocation Loc);
llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
const CXXRecordDecl *Derived,
@@ -1637,27 +1698,22 @@ public:
const FunctionArgList &Args);
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, bool Delegating,
- llvm::Value *This,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd);
-
+ llvm::Value *This, const CXXConstructExpr *E);
+
void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
llvm::Value *This, llvm::Value *Src,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd);
+ const CXXConstructExpr *E);
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
const ConstantArrayType *ArrayTy,
llvm::Value *ArrayPtr,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
+ const CXXConstructExpr *E,
bool ZeroInitialization = false);
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
llvm::Value *NumElements,
llvm::Value *ArrayPtr,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
+ const CXXConstructExpr *E,
bool ZeroInitialization = false);
static Destroyer destroyCXXObject;
@@ -1710,7 +1766,13 @@ public:
TCK_DowncastPointer,
/// Checking the operand of a static_cast to a derived reference type. Must
/// be an object within its lifetime.
- TCK_DowncastReference
+ TCK_DowncastReference,
+ /// Checking the operand of a cast to a base object. Must be suitably sized
+ /// and aligned.
+ TCK_Upcast,
+ /// Checking the operand of a cast to a virtual base object. Must be an
+ /// object within its lifetime.
+ TCK_UpcastToVirtualBase
};
/// \brief Whether any type-checking sanitizers are enabled. If \c false,
@@ -1720,7 +1782,8 @@ public:
/// \brief Emit a check that \p V is the address of storage of the
/// appropriate size and alignment for an object of type \p Type.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
- QualType Type, CharUnits Alignment = CharUnits::Zero());
+ QualType Type, CharUnits Alignment = CharUnits::Zero(),
+ bool SkipNullCheck = false);
/// \brief Emit a check that \p Base points into an array object, which
/// we can access at index \p Index. \p Accessed should be \c false if we
@@ -1732,6 +1795,13 @@ public:
bool isInc, bool isPre);
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre);
+
+ void EmitAlignmentAssumption(llvm::Value *PtrValue, unsigned Alignment,
+ llvm::Value *OffsetValue = nullptr) {
+ Builder.CreateAlignmentAssumption(CGM.getDataLayout(), PtrValue, Alignment,
+ OffsetValue);
+ }
+
//===--------------------------------------------------------------------===//
// Declaration Emission
//===--------------------------------------------------------------------===//
@@ -1746,13 +1816,17 @@ public:
/// This function can be called with a null (unreachable) insert point.
void EmitVarDecl(const VarDecl &D);
- void EmitScalarInit(const Expr *init, const ValueDecl *D,
- LValue lvalue, bool capturedByInit);
+ void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
+ bool capturedByInit);
void EmitScalarInit(llvm::Value *init, LValue lvalue);
typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
llvm::Value *Address);
+ /// \brief Determine whether the given initializer is trivial in the sense
+ /// that it requires no code to be generated.
+ bool isTrivialInitializer(const Expr *Init);
+
/// EmitAutoVarDecl - Emit an auto variable declaration.
///
/// This function can be called with a null (unreachable) insert point.
@@ -1886,12 +1960,12 @@ public:
void EmitIfStmt(const IfStmt &S);
void EmitCondBrHints(llvm::LLVMContext &Context, llvm::BranchInst *CondBr,
- const ArrayRef<const Attr *> &Attrs);
+ ArrayRef<const Attr *> Attrs);
void EmitWhileStmt(const WhileStmt &S,
- const ArrayRef<const Attr *> &Attrs = None);
- void EmitDoStmt(const DoStmt &S, const ArrayRef<const Attr *> &Attrs = None);
+ ArrayRef<const Attr *> Attrs = None);
+ void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = None);
void EmitForStmt(const ForStmt &S,
- const ArrayRef<const Attr *> &Attrs = None);
+ ArrayRef<const Attr *> Attrs = None);
void EmitReturnStmt(const ReturnStmt &S);
void EmitDeclStmt(const DeclStmt &S);
void EmitBreakStmt(const BreakStmt &S);
@@ -1915,27 +1989,55 @@ public:
void EmitSEHTryStmt(const SEHTryStmt &S);
void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
- const ArrayRef<const Attr *> &Attrs = None);
+ ArrayRef<const Attr *> Attrs = None);
+ LValue InitCapturedStruct(const CapturedStmt &S);
llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
+ void GenerateCapturedStmtFunctionProlog(const CapturedStmt &S);
+ llvm::Function *GenerateCapturedStmtFunctionEpilog(const CapturedStmt &S);
llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
llvm::Value *GenerateCapturedStmtArgument(const CapturedStmt &S);
+ void EmitOMPAggregateAssign(LValue OriginalAddr, llvm::Value *PrivateAddr,
+ const Expr *AssignExpr, QualType Type,
+ const VarDecl *VDInit);
+ void EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
+ OMPPrivateScope &PrivateScope);
+ void EmitOMPPrivateClause(const OMPExecutableDirective &D,
+ OMPPrivateScope &PrivateScope);
void EmitOMPParallelDirective(const OMPParallelDirective &S);
void EmitOMPSimdDirective(const OMPSimdDirective &S);
void EmitOMPForDirective(const OMPForDirective &S);
+ void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
void EmitOMPSectionDirective(const OMPSectionDirective &S);
void EmitOMPSingleDirective(const OMPSingleDirective &S);
void EmitOMPMasterDirective(const OMPMasterDirective &S);
void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
+ void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
void EmitOMPTaskDirective(const OMPTaskDirective &S);
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
void EmitOMPFlushDirective(const OMPFlushDirective &S);
+ void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
+ void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
+ void EmitOMPTargetDirective(const OMPTargetDirective &S);
+ void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
+
+private:
+
+ /// Helpers for the OpenMP loop directives.
+ void EmitOMPLoopBody(const OMPLoopDirective &Directive,
+ bool SeparateIter = false);
+ void EmitOMPInnerLoop(const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
+ bool SeparateIter = false);
+ void EmitOMPSimdFinal(const OMPLoopDirective &S);
+ void EmitOMPWorksharingLoop(const OMPLoopDirective &S);
+
+public:
//===--------------------------------------------------------------------===//
// LValue Expression Emission
@@ -1988,6 +2090,12 @@ public:
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
+ std::pair<RValue, RValue> EmitAtomicCompareExchange(
+ LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
+ llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
+ llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
+ bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
+
/// EmitToMemory - Change a scalar value from its value
/// representation to its in-memory representation.
llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
@@ -2039,7 +2147,7 @@ public:
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
- void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
+ void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
@@ -2082,6 +2190,8 @@ public:
LValue EmitCastLValue(const CastExpr *E);
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
+
+ llvm::Value *EmitExtVectorElementLValue(LValue V);
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
@@ -2166,12 +2276,10 @@ public:
const Decl *TargetDecl = nullptr,
llvm::Instruction **callOrInvoke = nullptr);
- RValue EmitCall(QualType FnType, llvm::Value *Callee,
- SourceLocation CallLoc,
+ RValue EmitCall(QualType FnType, llvm::Value *Callee, const CallExpr *E,
ReturnValueSlot ReturnValue,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
- const Decl *TargetDecl = nullptr);
+ const Decl *TargetDecl = nullptr,
+ llvm::Value *Chain = nullptr);
RValue EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue = ReturnValueSlot());
@@ -2207,23 +2315,28 @@ public:
CXXDtorType Type,
const CXXRecordDecl *RD);
- RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
- SourceLocation CallLoc,
- llvm::Value *Callee,
- ReturnValueSlot ReturnValue,
- llvm::Value *This,
- llvm::Value *ImplicitParam,
- QualType ImplicitParamTy,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd);
+ RValue
+ EmitCXXMemberOrOperatorCall(const CXXMethodDecl *MD, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue, llvm::Value *This,
+ llvm::Value *ImplicitParam,
+ QualType ImplicitParamTy, const CallExpr *E);
+ RValue EmitCXXStructorCall(const CXXMethodDecl *MD, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue, llvm::Value *This,
+ llvm::Value *ImplicitParam,
+ QualType ImplicitParamTy, const CallExpr *E,
+ StructorType Type);
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
ReturnValueSlot ReturnValue);
+ RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue,
+ bool HasQualifier,
+ NestedNameSpecifier *Qualifier,
+ bool IsArrow, const Expr *Base);
+ // Compute the object pointer.
RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
ReturnValueSlot ReturnValue);
- llvm::Value *EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
- const CXXMethodDecl *MD,
- llvm::Value *This);
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD,
ReturnValueSlot ReturnValue);
@@ -2233,7 +2346,8 @@ public:
RValue EmitBuiltinExpr(const FunctionDecl *FD,
- unsigned BuiltinID, const CallExpr *E);
+ unsigned BuiltinID, const CallExpr *E,
+ ReturnValueSlot ReturnValue);
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
@@ -2413,12 +2527,6 @@ public:
/// EmitLoadOfComplex - Load a complex number from the specified l-value.
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
- /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
- /// a static local variable.
- llvm::Constant *CreateStaticVarDecl(const VarDecl &D,
- const char *Separator,
- llvm::GlobalValue::LinkageTypes Linkage);
-
/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
/// global variable that has already been created for it. If the initializer
/// has a different type than GV does, this may free GV and return a different
@@ -2433,6 +2541,9 @@ public:
void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
bool PerformInit);
+ llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::Constant *Dtor,
+ llvm::Constant *Addr);
+
/// Call atexit() with a function that passes the given argument to
/// the given function.
void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::Constant *fn,
@@ -2449,7 +2560,7 @@ public:
/// GenerateCXXGlobalInitFunc - Generates code for initializing global
/// variables.
void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
- ArrayRef<llvm::Constant *> Decls,
+ ArrayRef<llvm::Function *> CXXThreadLocals,
llvm::GlobalVariable *Guard = nullptr);
/// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
@@ -2541,23 +2652,12 @@ public:
/// passing to a runtime sanitizer handler.
llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
- /// \brief Specify under what conditions this check can be recovered
- enum CheckRecoverableKind {
- /// Always terminate program execution if this check fails
- CRK_Unrecoverable,
- /// Check supports recovering, allows user to specify which
- CRK_Recoverable,
- /// Runtime conditionally aborts, always need to support recovery.
- CRK_AlwaysRecoverable
- };
-
/// \brief Create a basic block that will call a handler function in a
/// sanitizer runtime with the provided arguments, and create a conditional
/// branch to it.
- void EmitCheck(llvm::Value *Checked, StringRef CheckName,
- ArrayRef<llvm::Constant *> StaticArgs,
- ArrayRef<llvm::Value *> DynamicArgs,
- CheckRecoverableKind Recoverable);
+ void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerKind>> Checked,
+ StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs,
+ ArrayRef<llvm::Value *> DynamicArgs);
/// \brief Create a basic block that will call the trap intrinsic, and emit a
/// conditional branch to it, for the -ftrapv checks.
@@ -2589,18 +2689,15 @@ private:
/// from function arguments into \arg Dst. See ABIArgInfo::Expand.
///
/// \param AI - The first function argument of the expansion.
- /// \return The argument following the last expanded function
- /// argument.
- llvm::Function::arg_iterator
- ExpandTypeFromArgs(QualType Ty, LValue Dst,
- llvm::Function::arg_iterator AI);
-
- /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
- /// Ty, into individual arguments on the provided vector \arg Args. See
- /// ABIArgInfo::Expand.
- void ExpandTypeToArgs(QualType Ty, RValue Src,
- SmallVectorImpl<llvm::Value *> &Args,
- llvm::FunctionType *IRFuncTy);
+ void ExpandTypeFromArgs(QualType Ty, LValue Dst,
+ SmallVectorImpl<llvm::Argument *>::iterator &AI);
+
+ /// ExpandTypeToArgs - Expand an RValue \arg RV, with the LLVM type for \arg
+ /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
+ /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
+ void ExpandTypeToArgs(QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
+ SmallVectorImpl<llvm::Value *> &IRCallArgs,
+ unsigned &IRCallArgPos);
llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr, std::string &ConstraintStr);
@@ -2616,76 +2713,53 @@ public:
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
- bool ForceColumnInfo = false) {
- if (CallArgTypeInfo) {
- EmitCallArgs(Args, CallArgTypeInfo->isVariadic(),
- CallArgTypeInfo->param_type_begin(),
- CallArgTypeInfo->param_type_end(), ArgBeg, ArgEnd,
- ForceColumnInfo);
- } else {
- // T::param_type_iterator might not have a default ctor.
- const QualType *NoIter = nullptr;
- EmitCallArgs(Args, /*AllowExtraArguments=*/true, NoIter, NoIter, ArgBeg,
- ArgEnd, ForceColumnInfo);
- }
- }
-
- template<typename ArgTypeIterator>
- void EmitCallArgs(CallArgList& Args,
- bool AllowExtraArguments,
- ArgTypeIterator ArgTypeBeg,
- ArgTypeIterator ArgTypeEnd,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
- bool ForceColumnInfo = false) {
+ const FunctionDecl *CalleeDecl = nullptr,
+ unsigned ParamsToSkip = 0, bool ForceColumnInfo = false) {
SmallVector<QualType, 16> ArgTypes;
CallExpr::const_arg_iterator Arg = ArgBeg;
- // First, use the argument types that the type info knows about
- for (ArgTypeIterator I = ArgTypeBeg, E = ArgTypeEnd; I != E; ++I, ++Arg) {
- assert(Arg != ArgEnd && "Running over edge of argument list!");
-#ifndef NDEBUG
- QualType ArgType = *I;
- QualType ActualArgType = Arg->getType();
- if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
- QualType ActualBaseType =
- ActualArgType->getAs<PointerType>()->getPointeeType();
- QualType ArgBaseType =
- ArgType->getAs<PointerType>()->getPointeeType();
- if (ArgBaseType->isVariableArrayType()) {
- if (const VariableArrayType *VAT =
- getContext().getAsVariableArrayType(ActualBaseType)) {
- if (!VAT->getSizeExpr())
- ActualArgType = ArgType;
- }
- }
+ assert((ParamsToSkip == 0 || CallArgTypeInfo) &&
+ "Can't skip parameters if type info is not provided");
+ if (CallArgTypeInfo) {
+ // First, use the argument types that the type info knows about
+ for (auto I = CallArgTypeInfo->param_type_begin() + ParamsToSkip,
+ E = CallArgTypeInfo->param_type_end();
+ I != E; ++I, ++Arg) {
+ assert(Arg != ArgEnd && "Running over edge of argument list!");
+ assert(
+ ((*I)->isVariablyModifiedType() ||
+ getContext()
+ .getCanonicalType((*I).getNonReferenceType())
+ .getTypePtr() ==
+ getContext().getCanonicalType(Arg->getType()).getTypePtr()) &&
+ "type mismatch in call argument!");
+ ArgTypes.push_back(*I);
}
- assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
- getTypePtr() ==
- getContext().getCanonicalType(ActualArgType).getTypePtr() &&
- "type mismatch in call argument!");
-#endif
- ArgTypes.push_back(*I);
}
// Either we've emitted all the call args, or we have a call to variadic
- // function or some other call that allows extra arguments.
- assert((Arg == ArgEnd || AllowExtraArguments) &&
- "Extra arguments in non-variadic function!");
+ // function.
+ assert(
+ (Arg == ArgEnd || !CallArgTypeInfo || CallArgTypeInfo->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
// If we still have any arguments, emit them using the type of the argument.
for (; Arg != ArgEnd; ++Arg)
- ArgTypes.push_back(Arg->getType());
+ ArgTypes.push_back(getVarArgType(*Arg));
- EmitCallArgs(Args, ArgTypes, ArgBeg, ArgEnd, ForceColumnInfo);
+ EmitCallArgs(Args, ArgTypes, ArgBeg, ArgEnd, CalleeDecl, ParamsToSkip,
+ ForceColumnInfo);
}
void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
- bool ForceColumnInfo = false);
+ const FunctionDecl *CalleeDecl = nullptr,
+ unsigned ParamsToSkip = 0, bool ForceColumnInfo = false);
private:
+ QualType getVarArgType(const Expr *Arg);
+
const TargetCodeGenInfo &getTargetHooks() const {
return CGM.getTargetCodeGenInfo();
}
@@ -2701,6 +2775,8 @@ private:
/// GetPointeeAlignment - Given an expression with a pointer type, emit the
/// value and compute our best estimate of the alignment of the pointee.
std::pair<llvm::Value*, unsigned> EmitPointerWithAlignment(const Expr *Addr);
+
+ llvm::Value *GetValueForARMHint(unsigned BuiltinID);
};
/// Helper class with most of the code for saving a value for a
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 48823befcc07..8981bfe89cb4 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -22,6 +22,7 @@
#include "CodeGenFunction.h"
#include "CodeGenPGO.h"
#include "CodeGenTBAA.h"
+#include "CoverageMappingGen.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
@@ -74,7 +75,8 @@ static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
llvm::Module &M, const llvm::DataLayout &TD,
- DiagnosticsEngine &diags)
+ DiagnosticsEngine &diags,
+ CoverageSourceInfo *CoverageInfo)
: Context(C), LangOpts(C.getLangOpts()), CodeGenOpts(CGO), TheModule(M),
Diags(diags), TheDataLayout(TD), Target(C.getTargetInfo()),
ABI(createCXXABI(*this)), VMContext(M.getContext()), TBAA(nullptr),
@@ -87,8 +89,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
NSConcreteStackBlock(nullptr), BlockObjectAssign(nullptr),
BlockObjectDispose(nullptr), BlockDescriptorType(nullptr),
GenericBlockLiteralType(nullptr), LifetimeStartFn(nullptr),
- LifetimeEndFn(nullptr), SanitizerBL(llvm::SpecialCaseList::createOrDie(
- CGO.SanitizerBlacklistFile)) {
+ LifetimeEndFn(nullptr), SanitizerMD(new SanitizerMetadata(*this)) {
// Initialize the type cache.
llvm::LLVMContext &LLVMContext = M.getContext();
@@ -108,6 +109,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
+ BuiltinCC = getTargetCodeGenInfo().getABIInfo().getBuiltinCC();
if (LangOpts.ObjC1)
createObjCRuntime();
@@ -119,7 +121,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
createCUDARuntime();
// Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
- if (LangOpts.Sanitize.Thread ||
+ if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
(!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
TBAA = new CodeGenTBAA(Context, VMContext, CodeGenOpts, getLangOpts(),
getCXXABI().getMangleContext());
@@ -145,6 +147,11 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
getDiags().Report(DiagID) << EC.message();
}
}
+
+ // If coverage mapping generation is enabled, create the
+ // CoverageMappingModuleGen object.
+ if (CodeGenOpts.CoverageMapping)
+ CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo));
}
CodeGenModule::~CodeGenModule() {
@@ -190,6 +197,10 @@ void CodeGenModule::createCUDARuntime() {
CUDARuntime = CreateNVCUDARuntime(*this);
}
+void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
+ Replacements[Name] = C;
+}
+
void CodeGenModule::applyReplacements() {
for (ReplacementsTy::iterator I = Replacements.begin(),
E = Replacements.end();
@@ -235,7 +246,7 @@ static const llvm::GlobalObject *getAliasedGlobal(const llvm::GlobalAlias &GA) {
auto *GA2 = dyn_cast<llvm::GlobalAlias>(C);
if (!GA2)
return nullptr;
- if (!Visited.insert(GA2))
+ if (!Visited.insert(GA2).second)
return nullptr;
C = GA2->getAliasee();
}
@@ -334,15 +345,15 @@ void CodeGenModule::Release() {
if (ObjCRuntime)
if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
AddGlobalCtor(ObjCInitFunction);
- if (getCodeGenOpts().ProfileInstrGenerate)
- if (llvm::Function *PGOInit = CodeGenPGO::emitInitialization(*this))
- AddGlobalCtor(PGOInit, 0);
if (PGOReader && PGOStats.hasDiagnostics())
PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
EmitCtorList(GlobalCtors, "llvm.global_ctors");
EmitCtorList(GlobalDtors, "llvm.global_dtors");
EmitGlobalAnnotations();
EmitStaticExternCAliases();
+ EmitDeferredUnusedCoverageMappings();
+ if (CoverageMapping)
+ CoverageMapping->emit();
emitLLVMUsed();
if (CodeGenOpts.Autolink &&
@@ -378,6 +389,18 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
}
+ if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
+ llvm::PICLevel::Level PL = llvm::PICLevel::Default;
+ switch (PLevel) {
+ case 0: break;
+ case 1: PL = llvm::PICLevel::Small; break;
+ case 2: PL = llvm::PICLevel::Large; break;
+ default: llvm_unreachable("Invalid PIC Level");
+ }
+
+ getModule().setPICLevel(PL);
+ }
+
SimplifyPersonality();
if (getCodeGenOpts().EmitDeclMetadata)
@@ -510,11 +533,10 @@ static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(
llvm_unreachable("Invalid TLS model!");
}
-void CodeGenModule::setTLSMode(llvm::GlobalVariable *GV,
- const VarDecl &D) const {
+void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
- llvm::GlobalVariable::ThreadLocalMode TLM;
+ llvm::GlobalValue::ThreadLocalMode TLM;
TLM = GetLLVMTLSModel(CodeGenOpts.getDefaultTLSModel());
// Override the TLS model if it is explicitly specified.
@@ -548,9 +570,9 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
Str = II->getName();
}
- auto &Mangled = Manglings.GetOrCreateValue(Str);
- Mangled.second = GD;
- return FoundStr = Mangled.first();
+ // Keep the first result in the case of a mangling collision.
+ auto Result = Manglings.insert(std::make_pair(Str, GD));
+ return FoundStr = Result.first->first();
}
StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
@@ -570,9 +592,8 @@ StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
else
MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
- auto &Mangled = Manglings.GetOrCreateValue(Out.str());
- Mangled.second = BD;
- return Mangled.first();
+ auto Result = Manglings.insert(std::make_pair(Out.str(), BD));
+ return Result.first->first();
}
llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
@@ -601,7 +622,7 @@ void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
// Get the type of a ctor entry, { i32, void ()*, i8* }.
llvm::StructType *CtorStructTy = llvm::StructType::get(
- Int32Ty, llvm::PointerType::getUnqual(CtorFTy), VoidPtrTy, NULL);
+ Int32Ty, llvm::PointerType::getUnqual(CtorFTy), VoidPtrTy, nullptr);
// Construct the constructor and destructor arrays.
SmallVector<llvm::Constant*, 8> Ctors;
@@ -692,10 +713,6 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
// Naked implies noinline: we should not be inlining such functions.
B.addAttribute(llvm::Attribute::Naked);
B.addAttribute(llvm::Attribute::NoInline);
- } else if (D->hasAttr<OptimizeNoneAttr>()) {
- // OptimizeNone implies noinline; we should not be inlining such functions.
- B.addAttribute(llvm::Attribute::OptimizeNone);
- B.addAttribute(llvm::Attribute::NoInline);
} else if (D->hasAttr<NoDuplicateAttr>()) {
B.addAttribute(llvm::Attribute::NoDuplicate);
} else if (D->hasAttr<NoInlineAttr>()) {
@@ -708,19 +725,14 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
}
if (D->hasAttr<ColdAttr>()) {
- B.addAttribute(llvm::Attribute::OptimizeForSize);
+ if (!D->hasAttr<OptimizeNoneAttr>())
+ B.addAttribute(llvm::Attribute::OptimizeForSize);
B.addAttribute(llvm::Attribute::Cold);
}
if (D->hasAttr<MinSizeAttr>())
B.addAttribute(llvm::Attribute::MinSize);
- if (D->hasAttr<OptimizeNoneAttr>()) {
- // OptimizeNone wins over OptimizeForSize and MinSize.
- B.removeAttribute(llvm::Attribute::OptimizeForSize);
- B.removeAttribute(llvm::Attribute::MinSize);
- }
-
if (LangOpts.getStackProtector() == LangOptions::SSPOn)
B.addAttribute(llvm::Attribute::StackProtect);
else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
@@ -729,16 +741,19 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
B.addAttribute(llvm::Attribute::StackProtectReq);
// Add sanitizer attributes if function is not blacklisted.
- if (!SanitizerBL.isIn(*F)) {
+ if (!isInSanitizerBlacklist(F, D->getLocation())) {
// When AddressSanitizer is enabled, set SanitizeAddress attribute
// unless __attribute__((no_sanitize_address)) is used.
- if (LangOpts.Sanitize.Address && !D->hasAttr<NoSanitizeAddressAttr>())
+ if (LangOpts.Sanitize.has(SanitizerKind::Address) &&
+ !D->hasAttr<NoSanitizeAddressAttr>())
B.addAttribute(llvm::Attribute::SanitizeAddress);
// Same for ThreadSanitizer and __attribute__((no_sanitize_thread))
- if (LangOpts.Sanitize.Thread && !D->hasAttr<NoSanitizeThreadAttr>())
+ if (LangOpts.Sanitize.has(SanitizerKind::Thread) &&
+ !D->hasAttr<NoSanitizeThreadAttr>())
B.addAttribute(llvm::Attribute::SanitizeThread);
// Same for MemorySanitizer and __attribute__((no_sanitize_memory))
- if (LangOpts.Sanitize.Memory && !D->hasAttr<NoSanitizeMemoryAttr>())
+ if (LangOpts.Sanitize.has(SanitizerKind::Memory) &&
+ !D->hasAttr<NoSanitizeMemoryAttr>())
B.addAttribute(llvm::Attribute::SanitizeMemory);
}
@@ -746,6 +761,24 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
llvm::AttributeSet::get(
F->getContext(), llvm::AttributeSet::FunctionIndex, B));
+ if (D->hasAttr<OptimizeNoneAttr>()) {
+ // OptimizeNone implies noinline; we should not be inlining such functions.
+ F->addFnAttr(llvm::Attribute::OptimizeNone);
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // OptimizeNone wins over OptimizeForSize, MinSize, AlwaysInline.
+ assert(!F->hasFnAttribute(llvm::Attribute::OptimizeForSize) &&
+ "OptimizeNone and OptimizeForSize on same function!");
+ assert(!F->hasFnAttribute(llvm::Attribute::MinSize) &&
+ "OptimizeNone and MinSize on same function!");
+ assert(!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
+ "OptimizeNone and AlwaysInline on same function!");
+
+ // Attribute 'inlinehint' has no effect on 'optnone' functions.
+ // Explicitly remove it from the set of function attributes.
+ F->removeFnAttr(llvm::Attribute::InlineHint);
+ }
+
if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
F->setUnnamedAddr(true);
else if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
@@ -772,6 +805,16 @@ void CodeGenModule::SetCommonAttributes(const Decl *D,
addUsedGlobal(GV);
}
+void CodeGenModule::setAliasAttributes(const Decl *D,
+ llvm::GlobalValue *GV) {
+ SetCommonAttributes(D, GV);
+
+ // Process the dllexport attribute based on whether the original definition
+ // (not necessarily the aliasee) was exported.
+ if (D->hasAttr<DLLExportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+}
+
void CodeGenModule::setNonAliasAttributes(const Decl *D,
llvm::GlobalObject *GO) {
SetCommonAttributes(D, GO);
@@ -818,9 +861,9 @@ static void setLinkageAndVisibilityForGV(llvm::GlobalValue *GV,
}
}
-void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
- llvm::Function *F,
- bool IsIncompleteFunction) {
+void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
+ bool IsIncompleteFunction,
+ bool IsThunk) {
if (unsigned IID = F->getIntrinsicID()) {
// If this is an intrinsic function, set the function's attributes
// to the intrinsic's attributes.
@@ -837,7 +880,7 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
// Add the Returned attribute for "this", except for iOS 5 and earlier
// where substantial code, including the libstdc++ dylib, was compiled with
// GCC and does not actually return "this".
- if (getCXXABI().HasThisReturn(GD) &&
+ if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
!(getTarget().getTriple().isiOS() &&
getTarget().getTriple().isOSVersionLT(6))) {
assert(!F->arg_empty() &&
@@ -913,38 +956,37 @@ void CodeGenModule::emitLLVMUsed() {
}
void CodeGenModule::AppendLinkerOptions(StringRef Opts) {
- llvm::Value *MDOpts = llvm::MDString::get(getLLVMContext(), Opts);
+ auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opts);
LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
}
void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
llvm::SmallString<32> Opt;
getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt);
- llvm::Value *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
+ auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
}
void CodeGenModule::AddDependentLib(StringRef Lib) {
llvm::SmallString<24> Opt;
getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt);
- llvm::Value *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
+ auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
}
/// \brief Add link options implied by the given module, including modules
/// it depends on, using a postorder walk.
-static void addLinkOptionsPostorder(CodeGenModule &CGM,
- Module *Mod,
- SmallVectorImpl<llvm::Value *> &Metadata,
+static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
+ SmallVectorImpl<llvm::Metadata *> &Metadata,
llvm::SmallPtrSet<Module *, 16> &Visited) {
// Import this module's parent.
- if (Mod->Parent && Visited.insert(Mod->Parent)) {
+ if (Mod->Parent && Visited.insert(Mod->Parent).second) {
addLinkOptionsPostorder(CGM, Mod->Parent, Metadata, Visited);
}
// Import this module's dependencies.
for (unsigned I = Mod->Imports.size(); I > 0; --I) {
- if (Visited.insert(Mod->Imports[I-1]))
+ if (Visited.insert(Mod->Imports[I - 1]).second)
addLinkOptionsPostorder(CGM, Mod->Imports[I-1], Metadata, Visited);
}
@@ -955,10 +997,9 @@ static void addLinkOptionsPostorder(CodeGenModule &CGM,
// Link against a framework. Frameworks are currently Darwin only, so we
// don't to ask TargetCodeGenInfo for the spelling of the linker option.
if (Mod->LinkLibraries[I-1].IsFramework) {
- llvm::Value *Args[2] = {
- llvm::MDString::get(Context, "-framework"),
- llvm::MDString::get(Context, Mod->LinkLibraries[I-1].Library)
- };
+ llvm::Metadata *Args[2] = {
+ llvm::MDString::get(Context, "-framework"),
+ llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library)};
Metadata.push_back(llvm::MDNode::get(Context, Args));
continue;
@@ -968,7 +1009,7 @@ static void addLinkOptionsPostorder(CodeGenModule &CGM,
llvm::SmallString<24> Opt;
CGM.getTargetCodeGenInfo().getDependentLibraryOption(
Mod->LinkLibraries[I-1].Library, Opt);
- llvm::Value *OptString = llvm::MDString::get(Context, Opt);
+ auto *OptString = llvm::MDString::get(Context, Opt);
Metadata.push_back(llvm::MDNode::get(Context, OptString));
}
}
@@ -985,7 +1026,7 @@ void CodeGenModule::EmitModuleLinkOptions() {
for (llvm::SetVector<clang::Module *>::iterator M = ImportedModules.begin(),
MEnd = ImportedModules.end();
M != MEnd; ++M) {
- if (Visited.insert(*M))
+ if (Visited.insert(*M).second)
Stack.push_back(*M);
}
@@ -1005,7 +1046,7 @@ void CodeGenModule::EmitModuleLinkOptions() {
if ((*Sub)->IsExplicit)
continue;
- if (Visited.insert(*Sub)) {
+ if (Visited.insert(*Sub).second) {
Stack.push_back(*Sub);
AnyChildren = true;
}
@@ -1021,12 +1062,12 @@ void CodeGenModule::EmitModuleLinkOptions() {
// Add link options for all of the imported modules in reverse topological
// order. We don't do anything to try to order import link flags with respect
// to linker options inserted by things like #pragma comment().
- SmallVector<llvm::Value *, 16> MetadataArgs;
+ SmallVector<llvm::Metadata *, 16> MetadataArgs;
Visited.clear();
for (llvm::SetVector<clang::Module *>::iterator M = LinkModules.begin(),
MEnd = LinkModules.end();
M != MEnd; ++M) {
- if (Visited.insert(*M))
+ if (Visited.insert(*M).second)
addLinkOptionsPostorder(*this, *M, MetadataArgs, Visited);
}
std::reverse(MetadataArgs.begin(), MetadataArgs.end());
@@ -1061,14 +1102,18 @@ void CodeGenModule::EmitDeferred() {
llvm::GlobalValue *GV = G.GV;
DeferredDeclsToEmit.pop_back();
- assert(GV == GetGlobalValue(getMangledName(D)));
+ assert(!GV || GV == GetGlobalValue(getMangledName(D)));
+ if (!GV)
+ GV = GetGlobalValue(getMangledName(D));
+
+
// Check to see if we've already emitted this. This is necessary
// for a couple of reasons: first, decls can end up in the
// deferred-decls queue multiple times, and second, decls can end
// up with definitions in unusual ways (e.g. by an extern inline
// function acquiring a strong function redefinition). Just
// ignore these cases.
- if(!GV->isDeclaration())
+ if (GV && !GV->isDeclaration())
continue;
// Otherwise, emit the definition and move on to the next one.
@@ -1147,12 +1192,68 @@ void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation()));
}
-bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
+bool CodeGenModule::isInSanitizerBlacklist(llvm::Function *Fn,
+ SourceLocation Loc) const {
+ const auto &SanitizerBL = getContext().getSanitizerBlacklist();
+ // Blacklist by function name.
+ if (SanitizerBL.isBlacklistedFunction(Fn->getName()))
+ return true;
+ // Blacklist by location.
+ if (!Loc.isInvalid())
+ return SanitizerBL.isBlacklistedLocation(Loc);
+ // If location is unknown, this may be a compiler-generated function. Assume
+ // it's located in the main file.
+ auto &SM = Context.getSourceManager();
+ if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ return SanitizerBL.isBlacklistedFile(MainFile->getName());
+ }
+ return false;
+}
+
+bool CodeGenModule::isInSanitizerBlacklist(llvm::GlobalVariable *GV,
+ SourceLocation Loc, QualType Ty,
+ StringRef Category) const {
+ // For now globals can be blacklisted only in ASan.
+ if (!LangOpts.Sanitize.has(SanitizerKind::Address))
+ return false;
+ const auto &SanitizerBL = getContext().getSanitizerBlacklist();
+ if (SanitizerBL.isBlacklistedGlobal(GV->getName(), Category))
+ return true;
+ if (SanitizerBL.isBlacklistedLocation(Loc, Category))
+ return true;
+ // Check global type.
+ if (!Ty.isNull()) {
+ // Drill down the array types: if global variable of a fixed type is
+ // blacklisted, we also don't instrument arrays of them.
+ while (auto AT = dyn_cast<ArrayType>(Ty.getTypePtr()))
+ Ty = AT->getElementType();
+ Ty = Ty.getCanonicalType().getUnqualifiedType();
+ // We allow to blacklist only record types (classes, structs etc.)
+ if (Ty->isRecordType()) {
+ std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy());
+ if (SanitizerBL.isBlacklistedType(TypeStr, Category))
+ return true;
+ }
+ }
+ return false;
+}
+
+bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
// Never defer when EmitAllDecls is specified.
if (LangOpts.EmitAllDecls)
- return false;
+ return true;
- return !getContext().DeclMustBeEmitted(Global);
+ return getContext().DeclMustBeEmitted(Global);
+}
+
+bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(Global))
+ if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ // Implicit template instantiations may change linkage if they are later
+ // explicitly instantiated, so they should not be emitted eagerly.
+ return false;
+
+ return true;
}
llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor(
@@ -1167,7 +1268,7 @@ llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor(
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
return GV;
- llvm::Constant *Init = EmitUuidofInitializer(Uuid, E->getType());
+ llvm::Constant *Init = EmitUuidofInitializer(Uuid);
assert(Init && "failed to initialize as constant");
auto *GV = new llvm::GlobalVariable(
@@ -1261,9 +1362,10 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
return;
}
- // Defer code generation when possible if this is a static definition, inline
- // function etc. These we only want to emit if they are used.
- if (!MayDeferGeneration(Global)) {
+ // Defer code generation to first use when possible, e.g. if this is an inline
+ // function. If the global must always be emitted, do it eagerly if possible
+ // to benefit from cache locality.
+ if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
// Emit the definition if it can't be deferred.
EmitGlobalDefinition(GD);
return;
@@ -1276,13 +1378,16 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
CXXGlobalInits.push_back(nullptr);
}
-
- // If the value has already been used, add it directly to the
- // DeferredDeclsToEmit list.
+
StringRef MangledName = getMangledName(GD);
- if (llvm::GlobalValue *GV = GetGlobalValue(MangledName))
+ if (llvm::GlobalValue *GV = GetGlobalValue(MangledName)) {
+ // The value has already been used and should therefore be emitted.
addDeferredDeclToEmit(GV, GD);
- else {
+ } else if (MustBeEmitted(Global)) {
+ // The value must be emitted, but cannot be emitted eagerly.
+ assert(!MayBeEmittedEagerly(Global));
+ addDeferredDeclToEmit(/*GV=*/nullptr, GD);
+ } else {
// Otherwise, remember that we saw a deferred decl with this name. The
// first use of the mangled name will cause it to move into
// DeferredDeclsToEmit.
@@ -1394,9 +1499,9 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
// Make sure to emit the definition(s) before we emit the thunks.
// This is necessary for the generation of certain thunks.
if (const auto *CD = dyn_cast<CXXConstructorDecl>(Method))
- EmitCXXConstructor(CD, GD.getCtorType());
+ ABI->emitCXXStructor(CD, getFromCtorType(GD.getCtorType()));
else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Method))
- EmitCXXDestructor(DD, GD.getDtorType());
+ ABI->emitCXXStructor(DD, getFromDtorType(GD.getDtorType()));
else
EmitGlobalFunctionDefinition(GD, GV);
@@ -1426,7 +1531,7 @@ llvm::Constant *
CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
llvm::Type *Ty,
GlobalDecl GD, bool ForVTable,
- bool DontDefer,
+ bool DontDefer, bool IsThunk,
llvm::AttributeSet ExtraAttrs) {
const Decl *D = GD.getDecl();
@@ -1439,6 +1544,10 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
Entry->setLinkage(llvm::Function::ExternalLinkage);
}
+ // Handle dropped DLL attributes.
+ if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
+ Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
+
if (Entry->getType()->getElementType() == Ty)
return Entry;
@@ -1464,7 +1573,7 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
MangledName, &getModule());
assert(F->getName() == MangledName && "name was uniqued!");
if (D)
- SetFunctionAttributes(GD, F, IsIncompleteFunction);
+ SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
if (ExtraAttrs.hasAttributes(llvm::AttributeSet::FunctionIndex)) {
llvm::AttrBuilder B(ExtraAttrs, llvm::AttributeSet::FunctionIndex);
F->addAttributes(llvm::AttributeSet::FunctionIndex,
@@ -1510,26 +1619,18 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
// this will be unnecessary.
//
// We also don't emit a definition for a function if it's going to be an
- // entry
- // in a vtable, unless it's already marked as used.
+ // entry in a vtable, unless it's already marked as used.
} else if (getLangOpts().CPlusPlus && D) {
// Look for a declaration that's lexically in a record.
- const auto *FD = cast<FunctionDecl>(D);
- FD = FD->getMostRecentDecl();
- do {
+ for (const auto *FD = cast<FunctionDecl>(D)->getMostRecentDecl(); FD;
+ FD = FD->getPreviousDecl()) {
if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
- if (FD->isImplicit() && !ForVTable) {
- assert(FD->isUsed() &&
- "Sema didn't mark implicit function as used!");
- addDeferredDeclToEmit(F, GD.getWithDecl(FD));
- break;
- } else if (FD->doesThisDeclarationHaveABody()) {
+ if (FD->doesThisDeclarationHaveABody()) {
addDeferredDeclToEmit(F, GD.getWithDecl(FD));
break;
}
}
- FD = FD->getPreviousDecl();
- } while (FD);
+ }
}
}
@@ -1566,13 +1667,28 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy,
llvm::AttributeSet ExtraAttrs) {
llvm::Constant *C =
GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
- /*DontDefer=*/false, ExtraAttrs);
+ /*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs);
if (auto *F = dyn_cast<llvm::Function>(C))
if (F->empty())
F->setCallingConv(getRuntimeCC());
return C;
}
+/// CreateBuiltinFunction - Create a new builtin function with the specified
+/// type and name.
+llvm::Constant *
+CodeGenModule::CreateBuiltinFunction(llvm::FunctionType *FTy,
+ StringRef Name,
+ llvm::AttributeSet ExtraAttrs) {
+ llvm::Constant *C =
+ GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
+ /*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs);
+ if (auto *F = dyn_cast<llvm::Function>(C))
+ if (F->empty())
+ F->setCallingConv(getBuiltinCC());
+ return C;
+}
+
/// isTypeConstant - Determine whether an object of this type can be emitted
/// as a constant.
///
@@ -1612,6 +1728,10 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
Entry->setLinkage(llvm::Function::ExternalLinkage);
}
+ // Handle dropped DLL attributes.
+ if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
+ Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
+
if (Entry->getType() == Ty)
return Entry;
@@ -1741,7 +1861,7 @@ CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
assert(!D->getInit() && "Cannot emit definite definitions here!");
- if (MayDeferGeneration(D)) {
+ if (!MustBeEmitted(D)) {
// If we have not seen a reference to this variable yet, place it
// into the deferred declarations table to be emitted if needed
// later.
@@ -1808,6 +1928,31 @@ void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D,
R.first->second = nullptr;
}
+static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
+ if (!CGM.supportsCOMDAT())
+ return false;
+
+ if (D.hasAttr<SelectAnyAttr>())
+ return true;
+
+ GVALinkage Linkage;
+ if (auto *VD = dyn_cast<VarDecl>(&D))
+ Linkage = CGM.getContext().GetGVALinkageForVariable(VD);
+ else
+ Linkage = CGM.getContext().GetGVALinkageForFunction(cast<FunctionDecl>(&D));
+
+ switch (Linkage) {
+ case GVA_Internal:
+ case GVA_AvailableExternally:
+ case GVA_StrongExternal:
+ return false;
+ case GVA_DiscardableODR:
+ case GVA_StrongODR:
+ return true;
+ }
+ llvm_unreachable("No such linkage");
+}
+
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
llvm::Constant *Init = nullptr;
QualType ASTTy = D->getType();
@@ -1910,6 +2055,13 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
isTypeConstant(D->getType(), true));
+ // If it is in a read-only section, mark it 'constant'.
+ if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
+ const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()];
+ if ((SI.SectionFlags & ASTContext::PSF_Write) == 0)
+ GV->setConstant(true);
+ }
+
GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
// Set the llvm linkage type as appropriate.
@@ -1920,16 +2072,17 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
// has internal linkage; all accesses should just be calls to the
// Itanium-specified entry point, which has the normal linkage of the
// variable.
- if (const auto *VD = dyn_cast<VarDecl>(D))
- if (!VD->isStaticLocal() && VD->getTLSKind() == VarDecl::TLS_Dynamic &&
- Context.getTargetInfo().getTriple().isMacOSX())
- Linkage = llvm::GlobalValue::InternalLinkage;
+ if (!D->isStaticLocal() && D->getTLSKind() == VarDecl::TLS_Dynamic &&
+ Context.getTargetInfo().getTriple().isMacOSX())
+ Linkage = llvm::GlobalValue::InternalLinkage;
GV->setLinkage(Linkage);
if (D->hasAttr<DLLImportAttr>())
GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
else if (D->hasAttr<DLLExportAttr>())
GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
+ else
+ GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
if (Linkage == llvm::GlobalVariable::CommonLinkage)
// common vars aren't constant even if declared const.
@@ -1937,11 +2090,20 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
setNonAliasAttributes(D, GV);
+ if (D->getTLSKind() && !GV->isThreadLocal()) {
+ if (D->getTLSKind() == VarDecl::TLS_Dynamic)
+ CXXThreadLocals.push_back(std::make_pair(D, GV));
+ setTLSMode(GV, *D);
+ }
+
+ if (shouldBeInCOMDAT(*this, *D))
+ GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
+
// Emit the initializer function if necessary.
if (NeedsGlobalCtor || NeedsGlobalDtor)
EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
- reportGlobalToASan(GV, *D, NeedsGlobalCtor);
+ SanitizerMD->reportGlobalToASan(GV, *D, NeedsGlobalCtor);
// Emit global variable debug information.
if (CGDebugInfo *DI = getModuleDebugInfo())
@@ -1949,73 +2111,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
DI->EmitGlobalVariable(GV, D);
}
-void CodeGenModule::reportGlobalToASan(llvm::GlobalVariable *GV,
- SourceLocation Loc, StringRef Name,
- bool IsDynInit, bool IsBlacklisted) {
- if (!LangOpts.Sanitize.Address)
- return;
- IsDynInit &= !SanitizerBL.isIn(*GV, "init");
- IsBlacklisted |= SanitizerBL.isIn(*GV);
-
- llvm::GlobalVariable *LocDescr = nullptr;
- llvm::GlobalVariable *GlobalName = nullptr;
- if (!IsBlacklisted) {
- // Don't generate source location and global name if it is blacklisted -
- // it won't be instrumented anyway.
- PresumedLoc PLoc = Context.getSourceManager().getPresumedLoc(Loc);
- if (PLoc.isValid()) {
- llvm::Constant *LocData[] = {
- GetAddrOfConstantCString(PLoc.getFilename()),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- PLoc.getLine()),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- PLoc.getColumn()),
- };
- auto LocStruct = llvm::ConstantStruct::getAnon(LocData);
- LocDescr = new llvm::GlobalVariable(TheModule, LocStruct->getType(), true,
- llvm::GlobalValue::PrivateLinkage,
- LocStruct, ".asan_loc_descr");
- LocDescr->setUnnamedAddr(true);
- // Add LocDescr to llvm.compiler.used, so that it won't be removed by
- // the optimizer before the ASan instrumentation pass.
- addCompilerUsedGlobal(LocDescr);
- }
- if (!Name.empty()) {
- GlobalName = GetAddrOfConstantCString(Name);
- // GlobalName shouldn't be removed by the optimizer.
- addCompilerUsedGlobal(GlobalName);
- }
- }
-
- llvm::Value *GlobalMetadata[] = {
- GV, LocDescr, GlobalName,
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsDynInit),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsBlacklisted)};
-
- llvm::MDNode *ThisGlobal = llvm::MDNode::get(VMContext, GlobalMetadata);
- llvm::NamedMDNode *AsanGlobals =
- TheModule.getOrInsertNamedMetadata("llvm.asan.globals");
- AsanGlobals->addOperand(ThisGlobal);
-}
-
-void CodeGenModule::reportGlobalToASan(llvm::GlobalVariable *GV,
- const VarDecl &D, bool IsDynInit) {
- if (!LangOpts.Sanitize.Address)
- return;
- std::string QualName;
- llvm::raw_string_ostream OS(QualName);
- D.printQualifiedName(OS);
- reportGlobalToASan(GV, D.getLocation(), OS.str(), IsDynInit);
-}
-
-void CodeGenModule::disableSanitizerForGlobal(llvm::GlobalVariable *GV) {
- // For now, just make sure the global is not modified by the ASan
- // instrumentation.
- if (LangOpts.Sanitize.Address)
- reportGlobalToASan(GV, SourceLocation(), "", false, true);
-}
-
-static bool isVarDeclStrongDefinition(const VarDecl *D, bool NoCommon) {
+static bool isVarDeclStrongDefinition(const ASTContext &Context,
+ const VarDecl *D, bool NoCommon) {
// Don't give variables common linkage if -fno-common was specified unless it
// was overridden by a NoCommon attribute.
if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>())
@@ -2040,6 +2137,12 @@ static bool isVarDeclStrongDefinition(const VarDecl *D, bool NoCommon) {
if (D->hasAttr<WeakImportAttr>())
return true;
+ // Declarations with a required alignment do not have common linakge in MSVC
+ // mode.
+ if (Context.getLangOpts().MSVCCompat &&
+ (Context.isAlignmentRequired(D->getType()) || D->hasAttr<AlignedAttr>()))
+ return true;
+
return false;
}
@@ -2086,7 +2189,8 @@ llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
// C++ doesn't have tentative definitions and thus cannot have common
// linkage.
if (!getLangOpts().CPlusPlus && isa<VarDecl>(D) &&
- !isVarDeclStrongDefinition(cast<VarDecl>(D), CodeGenOpts.NoCommon))
+ !isVarDeclStrongDefinition(Context, cast<VarDecl>(D),
+ CodeGenOpts.NoCommon))
return llvm::GlobalVariable::CommonLinkage;
// selectany symbols are externally visible, so use weak instead of
@@ -2265,6 +2369,9 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
if (!GV->isDeclaration()) {
getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name);
+ GlobalDecl OldGD = Manglings.lookup(GV->getName());
+ if (auto *Prev = OldGD.getDecl())
+ getDiags().Report(Prev->getLocation(), diag::note_previous_definition);
return;
}
@@ -2314,12 +2421,21 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
// declarations).
auto *Fn = cast<llvm::Function>(GV);
setFunctionLinkage(GD, Fn);
+ if (D->hasAttr<DLLImportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
+ else if (D->hasAttr<DLLExportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
+ else
+ GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
// FIXME: this is redundant with part of setFunctionDefinitionAttributes
setGlobalVisibility(Fn, D);
MaybeHandleStaticInExternC(D, Fn);
+ if (shouldBeInCOMDAT(*this, *D))
+ Fn->setComdat(TheModule.getOrInsertComdat(Fn->getName()));
+
CodeGenFunction(*this).GenerateCode(D, Fn, FI);
setFunctionDefinitionAttributes(D, Fn);
@@ -2359,7 +2475,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
else
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
llvm::PointerType::getUnqual(DeclTy),
- nullptr);
+ /*D=*/nullptr);
// Create the new alias itself, but don't set a name yet.
auto *GA = llvm::GlobalAlias::create(
@@ -2393,21 +2509,16 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
// Set attributes which are particular to an alias; this is a
// specialization of the attributes which may be set on a global
// variable/function.
- if (D->hasAttr<DLLExportAttr>()) {
- if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
- // The dllexport attribute is ignored for undefined symbols.
- if (FD->hasBody())
- GA->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
- } else {
- GA->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
- }
- } else if (D->hasAttr<WeakAttr>() ||
- D->hasAttr<WeakRefAttr>() ||
- D->isWeakImported()) {
+ if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
+ D->isWeakImported()) {
GA->setLinkage(llvm::Function::WeakAnyLinkage);
}
- SetCommonAttributes(D, GA);
+ if (const auto *VD = dyn_cast<VarDecl>(D))
+ if (VD->getTLSKind())
+ setTLSMode(GA, *VD);
+
+ setAliasAttributes(D, GA);
}
llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
@@ -2428,7 +2539,7 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
// Check for simple case.
if (!Literal->containsNonAsciiOrNull()) {
StringLength = NumBytes;
- return Map.GetOrCreateValue(String);
+ return *Map.insert(std::make_pair(String, nullptr)).first;
}
// Otherwise, convert the UTF8 literals into a string of shorts.
@@ -2447,9 +2558,10 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
// Add an explicit null.
*ToPtr = 0;
- return Map.
- GetOrCreateValue(StringRef(reinterpret_cast<const char *>(ToBuf.data()),
- (StringLength + 1) * 2));
+ return *Map.insert(std::make_pair(
+ StringRef(reinterpret_cast<const char *>(ToBuf.data()),
+ (StringLength + 1) * 2),
+ nullptr)).first;
}
static llvm::StringMapEntry<llvm::Constant*> &
@@ -2458,7 +2570,7 @@ GetConstantStringEntry(llvm::StringMap<llvm::Constant*> &Map,
unsigned &StringLength) {
StringRef String = Literal->getString();
StringLength = String.size();
- return Map.GetOrCreateValue(String);
+ return *Map.insert(std::make_pair(String, nullptr)).first;
}
llvm::Constant *
@@ -2470,7 +2582,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
getDataLayout().isLittleEndian(),
isUTF16, StringLength);
- if (llvm::Constant *C = Entry.getValue())
+ if (auto *C = Entry.second)
return C;
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
@@ -2507,13 +2619,12 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
// String pointer.
llvm::Constant *C = nullptr;
if (isUTF16) {
- ArrayRef<uint16_t> Arr =
- llvm::makeArrayRef<uint16_t>(reinterpret_cast<uint16_t*>(
- const_cast<char *>(Entry.getKey().data())),
- Entry.getKey().size() / 2);
+ ArrayRef<uint16_t> Arr = llvm::makeArrayRef<uint16_t>(
+ reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())),
+ Entry.first().size() / 2);
C = llvm::ConstantDataArray::get(VMContext, Arr);
} else {
- C = llvm::ConstantDataArray::getString(VMContext, Entry.getKey());
+ C = llvm::ConstantDataArray::getString(VMContext, Entry.first());
}
// Note: -fwritable-strings doesn't make the backing store strings of
@@ -2554,7 +2665,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::GlobalVariable::PrivateLinkage, C,
"_unnamed_cfstring_");
GV->setSection("__DATA,__cfstring");
- Entry.setValue(GV);
+ Entry.second = GV;
return GV;
}
@@ -2564,8 +2675,8 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
unsigned StringLength = 0;
llvm::StringMapEntry<llvm::Constant*> &Entry =
GetConstantStringEntry(CFConstantStringMap, Literal, StringLength);
-
- if (llvm::Constant *C = Entry.getValue())
+
+ if (auto *C = Entry.second)
return C;
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
@@ -2638,8 +2749,8 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
// String pointer.
llvm::Constant *C =
- llvm::ConstantDataArray::getString(VMContext, Entry.getKey());
-
+ llvm::ConstantDataArray::getString(VMContext, Entry.first());
+
llvm::GlobalValue::LinkageTypes Linkage;
bool isConstant;
Linkage = llvm::GlobalValue::PrivateLinkage;
@@ -2670,8 +2781,8 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
GV->setSection(LangOpts.ObjCRuntime.isNonFragile()
? NSStringNonFragileABISection
: NSStringSection);
- Entry.setValue(GV);
-
+ Entry.second = GV;
+
return GV;
}
@@ -2769,7 +2880,8 @@ GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
/// constant array for the given string literal.
llvm::GlobalVariable *
-CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
+CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
+ StringRef Name) {
auto Alignment =
getContext().getAlignOfGlobalVarInChars(S->getType()).getQuantity();
@@ -2791,7 +2903,8 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
// Mangle the string literal if the ABI allows for it. However, we cannot
// do this if we are compiling with ASan or -fwritable-strings because they
// rely on strings having normal linkage.
- if (!LangOpts.WritableStrings && !LangOpts.Sanitize.Address &&
+ if (!LangOpts.WritableStrings &&
+ !LangOpts.Sanitize.has(SanitizerKind::Address) &&
getCXXABI().getMangleContext().shouldMangleStringLiteral(S)) {
llvm::raw_svector_ostream Out(MangledNameBuffer);
getCXXABI().getMangleContext().mangleStringLiteral(S, Out);
@@ -2801,14 +2914,15 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
GlobalVariableName = MangledNameBuffer;
} else {
LT = llvm::GlobalValue::PrivateLinkage;
- GlobalVariableName = ".str";
+ GlobalVariableName = Name;
}
auto GV = GenerateStringLiteral(C, LT, *this, GlobalVariableName, Alignment);
if (Entry)
*Entry = GV;
- reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>");
+ SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>",
+ QualType());
return GV;
}
@@ -2972,6 +3086,19 @@ static bool needsDestructMethod(ObjCImplementationDecl *impl) {
return false;
}
+static bool AllTrivialInitializers(CodeGenModule &CGM,
+ ObjCImplementationDecl *D) {
+ CodeGenFunction CGF(CGM);
+ for (ObjCImplementationDecl::init_iterator B = D->init_begin(),
+ E = D->init_end(); B != E; ++B) {
+ CXXCtorInitializer *CtorInitExp = *B;
+ Expr *Init = CtorInitExp->getInit();
+ if (!CGF.isTrivialInitializer(Init))
+ return false;
+ }
+ return true;
+}
+
/// EmitObjCIvarInitializations - Emit information for ivar initialization
/// for an implementation.
void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
@@ -2992,7 +3119,8 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
// If the implementation doesn't have any ivar initializers, we don't need
// a .cxx_construct.
- if (D->getNumIvarInitializers() == 0)
+ if (D->getNumIvarInitializers() == 0 ||
+ AllTrivialInitializers(*this, D))
return;
IdentifierInfo *II = &getContext().Idents.get(".cxx_construct");
@@ -3060,6 +3188,9 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
return;
EmitGlobal(cast<FunctionDecl>(D));
+ // Always provide some coverage mapping
+ // even for the functions that aren't emitted.
+ AddDeferredUnusedCoverageMapping(D);
break;
case Decl::Var:
@@ -3194,11 +3325,17 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
break;
}
+ case Decl::OMPThreadPrivate:
+ EmitOMPThreadPrivateDecl(cast<OMPThreadPrivateDecl>(D));
+ break;
+
case Decl::ClassTemplateSpecialization: {
const auto *Spec = cast<ClassTemplateSpecializationDecl>(D);
if (DebugInfo &&
- Spec->getSpecializationKind() == TSK_ExplicitInstantiationDefinition)
+ Spec->getSpecializationKind() == TSK_ExplicitInstantiationDefinition &&
+ Spec->hasDefinition())
DebugInfo->completeTemplateDefinition(*Spec);
+ break;
}
default:
@@ -3206,6 +3343,91 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
// function. Need to recode Decl::Kind to do that easily.
assert(isa<TypeDecl>(D) && "Unsupported decl kind");
+ break;
+ }
+}
+
+void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
+ // Do we need to generate coverage mapping?
+ if (!CodeGenOpts.CoverageMapping)
+ return;
+ switch (D->getKind()) {
+ case Decl::CXXConversion:
+ case Decl::CXXMethod:
+ case Decl::Function:
+ case Decl::ObjCMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor: {
+ if (!cast<FunctionDecl>(D)->hasBody())
+ return;
+ auto I = DeferredEmptyCoverageMappingDecls.find(D);
+ if (I == DeferredEmptyCoverageMappingDecls.end())
+ DeferredEmptyCoverageMappingDecls[D] = true;
+ break;
+ }
+ default:
+ break;
+ };
+}
+
+void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) {
+ // Do we need to generate coverage mapping?
+ if (!CodeGenOpts.CoverageMapping)
+ return;
+ if (const auto *Fn = dyn_cast<FunctionDecl>(D)) {
+ if (Fn->isTemplateInstantiation())
+ ClearUnusedCoverageMapping(Fn->getTemplateInstantiationPattern());
+ }
+ auto I = DeferredEmptyCoverageMappingDecls.find(D);
+ if (I == DeferredEmptyCoverageMappingDecls.end())
+ DeferredEmptyCoverageMappingDecls[D] = false;
+ else
+ I->second = false;
+}
+
+void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
+ std::vector<const Decl *> DeferredDecls;
+ for (const auto I : DeferredEmptyCoverageMappingDecls) {
+ if (!I.second)
+ continue;
+ DeferredDecls.push_back(I.first);
+ }
+ // Sort the declarations by their location to make sure that the tests get a
+ // predictable order for the coverage mapping for the unused declarations.
+ if (CodeGenOpts.DumpCoverageMapping)
+ std::sort(DeferredDecls.begin(), DeferredDecls.end(),
+ [] (const Decl *LHS, const Decl *RHS) {
+ return LHS->getLocStart() < RHS->getLocStart();
+ });
+ for (const auto *D : DeferredDecls) {
+ switch (D->getKind()) {
+ case Decl::CXXConversion:
+ case Decl::CXXMethod:
+ case Decl::Function:
+ case Decl::ObjCMethod: {
+ CodeGenPGO PGO(*this);
+ GlobalDecl GD(cast<FunctionDecl>(D));
+ PGO.emitEmptyCounterMapping(D, getMangledName(GD),
+ getFunctionLinkage(GD));
+ break;
+ }
+ case Decl::CXXConstructor: {
+ CodeGenPGO PGO(*this);
+ GlobalDecl GD(cast<CXXConstructorDecl>(D), Ctor_Base);
+ PGO.emitEmptyCounterMapping(D, getMangledName(GD),
+ getFunctionLinkage(GD));
+ break;
+ }
+ case Decl::CXXDestructor: {
+ CodeGenPGO PGO(*this);
+ GlobalDecl GD(cast<CXXDestructorDecl>(D), Dtor_Base);
+ PGO.emitEmptyCounterMapping(D, getMangledName(GD),
+ getFunctionLinkage(GD));
+ break;
+ }
+ default:
+ break;
+ };
}
}
@@ -3226,10 +3448,9 @@ static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs");
// TODO: should we report variant information for ctors/dtors?
- llvm::Value *Ops[] = {
- Addr,
- GetPointerConstant(CGM.getLLVMContext(), D.getDecl())
- };
+ llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(Addr),
+ llvm::ConstantAsMetadata::get(GetPointerConstant(
+ CGM.getLLVMContext(), D.getDecl()))};
GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
}
@@ -3292,7 +3513,9 @@ void CodeGenFunction::EmitDeclMetadata() {
llvm::Value *Addr = I.second;
if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
- Alloca->setMetadata(DeclPtrKind, llvm::MDNode::get(Context, DAddr));
+ Alloca->setMetadata(
+ DeclPtrKind, llvm::MDNode::get(
+ Context, llvm::ValueAsMetadata::getConstant(DAddr)));
} else if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
@@ -3306,16 +3529,21 @@ void CodeGenModule::EmitVersionIdentMetadata() {
std::string Version = getClangFullVersion();
llvm::LLVMContext &Ctx = TheModule.getContext();
- llvm::Value *IdentNode[] = {
- llvm::MDString::get(Ctx, Version)
- };
+ llvm::Metadata *IdentNode[] = {llvm::MDString::get(Ctx, Version)};
IdentMetadata->addOperand(llvm::MDNode::get(Ctx, IdentNode));
}
void CodeGenModule::EmitTargetMetadata() {
- for (auto &I : MangledDeclNames) {
- const Decl *D = I.first.getDecl()->getMostRecentDecl();
- llvm::GlobalValue *GV = GetGlobalValue(I.second);
+ // Warning, new MangledDeclNames may be appended within this loop.
+ // We rely on MapVector insertions adding new elements to the end
+ // of the container.
+ // FIXME: Move this loop into the one target that needs it, and only
+ // loop over those declarations for which we couldn't emit the target
+ // metadata when we emitted the declaration.
+ for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
+ auto Val = *(MangledDeclNames.begin() + I);
+ const Decl *D = Val.first.getDecl()->getMostRecentDecl();
+ llvm::GlobalValue *GV = GetGlobalValue(Val.second);
getTargetCodeGenInfo().emitTargetMD(D, GV, *this);
}
}
@@ -3329,16 +3557,14 @@ void CodeGenModule::EmitCoverageFile() {
llvm::MDString::get(Ctx, getCodeGenOpts().CoverageFile);
for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
llvm::MDNode *CU = CUNode->getOperand(i);
- llvm::Value *node[] = { CoverageFile, CU };
- llvm::MDNode *N = llvm::MDNode::get(Ctx, node);
- GCov->addOperand(N);
+ llvm::Metadata *Elts[] = {CoverageFile, CU};
+ GCov->addOperand(llvm::MDNode::get(Ctx, Elts));
}
}
}
}
-llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid,
- QualType GuidType) {
+llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid) {
// Sema has checked that all uuid strings are of the form
// "12345678-1234-1234-1234-1234567890ab".
assert(Uuid.size() == 36);
@@ -3347,6 +3573,7 @@ llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid,
else assert(isHexDigit(Uuid[i]));
}
+ // The starts of all bytes of Field3 in Uuid. Field 3 is "1234-1234567890ab".
const unsigned Field3ValueOffsets[8] = { 19, 21, 24, 26, 28, 30, 32, 34 };
llvm::Constant *Field3[8];
@@ -3379,3 +3606,18 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
return getCXXABI().getAddrOfRTTIDescriptor(Ty);
}
+void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
+ for (auto RefExpr : D->varlists()) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(RefExpr)->getDecl());
+ bool PerformInit =
+ VD->getAnyInitializer() &&
+ !VD->getAnyInitializer()->isConstantInitializer(getContext(),
+ /*ForRef=*/false);
+ if (auto InitFunction =
+ getOpenMPRuntime().EmitOMPThreadPrivateVarDefinition(
+ VD, GetAddrOfGlobalVar(VD), RefExpr->getLocStart(),
+ PerformInit))
+ CXXGlobalInits.push_back(InitFunction);
+ }
+}
+
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 9533a8dabb20..54f3a82feb2d 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -11,12 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CODEGENMODULE_H
-#define CLANG_CODEGEN_CODEGENMODULE_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENMODULE_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENMODULE_H
#include "CGVTables.h"
#include "CodeGenTypes.h"
-#include "SanitizerBlacklist.h"
+#include "SanitizerMetadata.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
@@ -25,6 +25,7 @@
#include "clang/Basic/ABI.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
+#include "clang/Basic/SanitizerBlacklist.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -72,6 +73,7 @@ class DiagnosticsEngine;
class AnnotateAttr;
class CXXDestructorDecl;
class Module;
+class CoverageSourceInfo;
namespace CodeGen {
@@ -86,6 +88,7 @@ class CGOpenMPRuntime;
class CGCUDARuntime;
class BlockFieldFlags;
class FunctionArgList;
+class CoverageMappingModuleGen;
struct OrderGlobalInits {
unsigned int priority;
@@ -147,6 +150,8 @@ struct CodeGenTypeCache {
llvm::CallingConv::ID RuntimeCC;
llvm::CallingConv::ID getRuntimeCC() const { return RuntimeCC; }
+ llvm::CallingConv::ID BuiltinCC;
+ llvm::CallingConv::ID getBuiltinCC() const { return BuiltinCC; }
};
struct RREntrypoints {
@@ -256,6 +261,7 @@ class CodeGenModule : public CodeGenTypeCache {
CodeGenModule(const CodeGenModule &) LLVM_DELETED_FUNCTION;
void operator=(const CodeGenModule &) LLVM_DELETED_FUNCTION;
+public:
struct Structor {
Structor() : Priority(0), Initializer(nullptr), AssociatedData(nullptr) {}
Structor(int Priority, llvm::Constant *Initializer,
@@ -269,6 +275,7 @@ class CodeGenModule : public CodeGenTypeCache {
typedef std::vector<Structor> CtorList;
+private:
ASTContext &Context;
const LangOptions &LangOpts;
const CodeGenOptions &CodeGenOpts;
@@ -384,10 +391,11 @@ class CodeGenModule : public CodeGenTypeCache {
/// \brief thread_local variables with initializers that need to run
/// before any thread_local variable in this TU is odr-used.
- std::vector<llvm::Constant*> CXXThreadLocalInits;
+ std::vector<llvm::Function *> CXXThreadLocalInits;
+ std::vector<llvm::GlobalVariable *> CXXThreadLocalInitVars;
/// Global variables with initializers that need to run before main.
- std::vector<llvm::Constant*> CXXGlobalInits;
+ std::vector<llvm::Function *> CXXGlobalInits;
/// When a C++ decl with an initializer is deferred, null is
/// appended to CXXGlobalInits, and the index of that null is placed
@@ -415,7 +423,7 @@ class CodeGenModule : public CodeGenTypeCache {
llvm::SetVector<clang::Module *> ImportedModules;
/// \brief A vector of metadata strings.
- SmallVector<llvm::Value *, 16> LinkerOptionsMetadata;
+ SmallVector<llvm::Metadata *, 16> LinkerOptionsMetadata;
/// @name Cache for Objective-C runtime types
/// @{
@@ -471,13 +479,18 @@ class CodeGenModule : public CodeGenTypeCache {
GlobalDecl initializedGlobalDecl;
- SanitizerBlacklist SanitizerBL;
+ std::unique_ptr<SanitizerMetadata> SanitizerMD;
/// @}
+
+ llvm::DenseMap<const Decl *, bool> DeferredEmptyCoverageMappingDecls;
+
+ std::unique_ptr<CoverageMappingModuleGen> CoverageMapping;
public:
CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
llvm::Module &M, const llvm::DataLayout &TD,
- DiagnosticsEngine &Diags);
+ DiagnosticsEngine &Diags,
+ CoverageSourceInfo *CoverageInfo = nullptr);
~CodeGenModule();
@@ -526,6 +539,10 @@ public:
InstrProfStats &getPGOStats() { return PGOStats; }
llvm::IndexedInstrProfReader *getPGOReader() const { return PGOReader.get(); }
+ CoverageMappingModuleGen *getCoverageMapping() const {
+ return CoverageMapping.get();
+ }
+
llvm::Constant *getStaticLocalDeclAddress(const VarDecl *D) {
return StaticLocalDeclMap[D];
}
@@ -534,6 +551,10 @@ public:
StaticLocalDeclMap[D] = C;
}
+ llvm::Constant *
+ getOrCreateStaticVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
llvm::GlobalVariable *getStaticLocalDeclGuardAddress(const VarDecl *D) {
return StaticLocalDeclGuardMap[D];
}
@@ -572,9 +593,7 @@ public:
llvm::MDNode *getNoObjCARCExceptionsMetadata() {
if (!NoObjCARCExceptionsMetadata)
- NoObjCARCExceptionsMetadata =
- llvm::MDNode::get(getLLVMContext(),
- SmallVector<llvm::Value*,1>());
+ NoObjCARCExceptionsMetadata = llvm::MDNode::get(getLLVMContext(), None);
return NoObjCARCExceptionsMetadata;
}
@@ -585,6 +604,9 @@ public:
DiagnosticsEngine &getDiags() const { return Diags; }
const llvm::DataLayout &getDataLayout() const { return TheDataLayout; }
const TargetInfo &getTarget() const { return Target; }
+ const llvm::Triple &getTriple() const;
+ bool supportsCOMDAT() const;
+
CGCXXABI &getCXXABI() const { return *ABI; }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
@@ -604,6 +626,9 @@ public:
return VTables.getMicrosoftVTableContext();
}
+ CtorList &getGlobalCtors() { return GlobalCtors; }
+ CtorList &getGlobalDtors() { return GlobalDtors; }
+
llvm::MDNode *getTBAAInfo(QualType QTy);
llvm::MDNode *getTBAAInfoForVTablePtr();
llvm::MDNode *getTBAAStructInfo(QualType QTy);
@@ -632,9 +657,9 @@ public:
/// Set the visibility for the given LLVM GlobalValue.
void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const;
- /// Set the TLS mode for the given LLVM GlobalVariable for the thread-local
+ /// Set the TLS mode for the given LLVM GlobalValue for the thread-local
/// variable declaration D.
- void setTLSMode(llvm::GlobalVariable *GV, const VarDecl &D) const;
+ void setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const;
static llvm::GlobalValue::VisibilityTypes GetLLVMVisibility(Visibility V) {
switch (V) {
@@ -647,11 +672,11 @@ public:
llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) {
if (isa<CXXConstructorDecl>(GD.getDecl()))
- return GetAddrOfCXXConstructor(cast<CXXConstructorDecl>(GD.getDecl()),
- GD.getCtorType());
+ return getAddrOfCXXStructor(cast<CXXConstructorDecl>(GD.getDecl()),
+ getFromCtorType(GD.getCtorType()));
else if (isa<CXXDestructorDecl>(GD.getDecl()))
- return GetAddrOfCXXDestructor(cast<CXXDestructorDecl>(GD.getDecl()),
- GD.getDtorType());
+ return getAddrOfCXXStructor(cast<CXXDestructorDecl>(GD.getDecl()),
+ getFromDtorType(GD.getDtorType()));
else if (isa<FunctionDecl>(GD.getDecl()))
return GetAddrOfFunction(GD);
else
@@ -666,6 +691,11 @@ public:
CreateOrReplaceCXXRuntimeVariable(StringRef Name, llvm::Type *Ty,
llvm::GlobalValue::LinkageTypes Linkage);
+ llvm::Function *
+ CreateGlobalInitOrDestructFunction(llvm::FunctionType *ty, const Twine &name,
+ SourceLocation Loc = SourceLocation(),
+ bool TLS = false);
+
/// Return the address space of the underlying global variable for D, as
/// determined by its declaration. Normally this is the same as the address
/// space of D's type, but in CUDA, address spaces are associated with
@@ -759,7 +789,8 @@ public:
/// Return a pointer to a constant array for the given string literal.
llvm::GlobalVariable *
- GetAddrOfConstantStringFromLiteral(const StringLiteral *S);
+ GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
+ StringRef Name = ".str");
/// Return a pointer to a constant array for the given ObjCEncodeExpr node.
llvm::GlobalVariable *
@@ -787,20 +818,19 @@ public:
/// \brief Retrieve the record type that describes the state of an
/// Objective-C fast enumeration loop (for..in).
QualType getObjCFastEnumerationStateType();
-
- /// Return the address of the constructor of the given type.
- llvm::GlobalValue *
- GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor, CXXCtorType ctorType,
- const CGFunctionInfo *fnInfo = nullptr,
- bool DontDefer = false);
- /// Return the address of the constructor of the given type.
+ // Produce code for this constructor/destructor. This method doesn't try
+ // to apply any ABI rules about which other constructors/destructors
+ // are needed or if they are alias to each other.
+ llvm::Function *codegenCXXStructor(const CXXMethodDecl *MD,
+ StructorType Type);
+
+ /// Return the address of the constructor/destructor of the given type.
llvm::GlobalValue *
- GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
- CXXDtorType dtorType,
- const CGFunctionInfo *fnInfo = nullptr,
- llvm::FunctionType *fnType = nullptr,
- bool DontDefer = false);
+ getAddrOfCXXStructor(const CXXMethodDecl *MD, StructorType Type,
+ const CGFunctionInfo *FnInfo = nullptr,
+ llvm::FunctionType *FnType = nullptr,
+ bool DontDefer = false);
/// Given a builtin id for a function like "__builtin_fabsf", return a
/// Function* for "fabsf".
@@ -812,6 +842,18 @@ public:
/// Emit code for a single top level declaration.
void EmitTopLevelDecl(Decl *D);
+ /// \brief Stored a deferred empty coverage mapping for an unused
+ /// and thus uninstrumented top level declaration.
+ void AddDeferredUnusedCoverageMapping(Decl *D);
+
+ /// \brief Remove the deferred empty coverage mapping as this
+ /// declaration is actually instrumented.
+ void ClearUnusedCoverageMapping(const Decl *D);
+
+ /// \brief Emit all the deferred coverage mappings
+ /// for the uninstrumented functions.
+ void EmitDeferredUnusedCoverageMappings();
+
/// Tell the consumer that this variable has been instantiated.
void HandleCXXStaticMemberVarInstantiation(VarDecl *VD);
@@ -837,6 +879,11 @@ public:
StringRef Name,
llvm::AttributeSet ExtraAttrs =
llvm::AttributeSet());
+ /// Create a new compiler builtin function with the specified type and name.
+ llvm::Constant *CreateBuiltinFunction(llvm::FunctionType *Ty,
+ StringRef Name,
+ llvm::AttributeSet ExtraAttrs =
+ llvm::AttributeSet());
/// Create a new runtime global variable with the specified type and name.
llvm::Constant *CreateRuntimeVariable(llvm::Type *Ty,
StringRef Name);
@@ -910,7 +957,7 @@ public:
llvm::Function *F);
/// Set the LLVM function attributes which only apply to a function
- /// definintion.
+ /// definition.
void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
/// Return true iff the given type uses 'sret' when used as a return type.
@@ -1009,18 +1056,15 @@ public:
/// annotations are emitted during finalization of the LLVM code.
void AddGlobalAnnotations(const ValueDecl *D, llvm::GlobalValue *GV);
- const SanitizerBlacklist &getSanitizerBlacklist() const {
- return SanitizerBL;
- }
+ bool isInSanitizerBlacklist(llvm::Function *Fn, SourceLocation Loc) const;
- void reportGlobalToASan(llvm::GlobalVariable *GV, const VarDecl &D,
- bool IsDynInit = false);
- void reportGlobalToASan(llvm::GlobalVariable *GV, SourceLocation Loc,
- StringRef Name, bool IsDynInit = false,
- bool IsBlacklisted = false);
+ bool isInSanitizerBlacklist(llvm::GlobalVariable *GV, SourceLocation Loc,
+ QualType Ty,
+ StringRef Category = StringRef()) const;
- /// Disable sanitizer instrumentation for this global.
- void disableSanitizerForGlobal(llvm::GlobalVariable *GV);
+ SanitizerMetadata *getSanitizerMetadata() {
+ return SanitizerMD.get();
+ }
void addDeferredVTable(const CXXRecordDecl *RD) {
DeferredVTables.push_back(RD);
@@ -1030,34 +1074,50 @@ public:
/// are emitted lazily.
void EmitGlobal(GlobalDecl D);
-private:
+ bool TryEmitDefinitionAsAlias(GlobalDecl Alias, GlobalDecl Target,
+ bool InEveryTU);
+ bool TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D);
+
+ /// Set attributes for a global definition.
+ void setFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::Function *F);
+
llvm::GlobalValue *GetGlobalValue(StringRef Ref);
+ /// Set attributes which are common to any form of a global definition (alias,
+ /// Objective-C method, function, global variable).
+ ///
+ /// NOTE: This should only be called for definitions.
+ void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV);
+
+ /// Set attributes which must be preserved by an alias. This includes common
+ /// attributes (i.e. it includes a call to SetCommonAttributes).
+ ///
+ /// NOTE: This should only be called for definitions.
+ void setAliasAttributes(const Decl *D, llvm::GlobalValue *GV);
+
+ void addReplacement(StringRef Name, llvm::Constant *C);
+
+ /// \brief Emit a code for threadprivate directive.
+ /// \param D Threadprivate declaration.
+ void EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D);
+
+private:
llvm::Constant *
GetOrCreateLLVMFunction(StringRef MangledName, llvm::Type *Ty, GlobalDecl D,
bool ForVTable, bool DontDefer = false,
+ bool IsThunk = false,
llvm::AttributeSet ExtraAttrs = llvm::AttributeSet());
llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
llvm::PointerType *PTy,
const VarDecl *D);
- /// Set attributes which are common to any form of a global definition (alias,
- /// Objective-C method, function, global variable).
- ///
- /// NOTE: This should only be called for definitions.
- void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV);
-
void setNonAliasAttributes(const Decl *D, llvm::GlobalObject *GO);
- /// Set attributes for a global definition.
- void setFunctionDefinitionAttributes(const FunctionDecl *D,
- llvm::Function *F);
-
/// Set function attributes for a function declaration.
- void SetFunctionAttributes(GlobalDecl GD,
- llvm::Function *F,
- bool IsIncompleteFunction);
+ void SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
+ bool IsIncompleteFunction, bool IsThunk);
void EmitGlobalDefinition(GlobalDecl D, llvm::GlobalValue *GV = nullptr);
@@ -1069,20 +1129,10 @@ private:
// C++ related functions.
- bool TryEmitDefinitionAsAlias(GlobalDecl Alias, GlobalDecl Target,
- bool InEveryTU);
- bool TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D);
-
void EmitNamespace(const NamespaceDecl *D);
void EmitLinkageSpec(const LinkageSpecDecl *D);
void CompleteDIClassType(const CXXMethodDecl* D);
- /// Emit a single constructor with the given type from a C++ constructor Decl.
- void EmitCXXConstructor(const CXXConstructorDecl *D, CXXCtorType Type);
-
- /// Emit a single destructor with the given type from a C++ destructor Decl.
- void EmitCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type);
-
/// \brief Emit the function that initializes C++ thread_local variables.
void EmitCXXThreadLocalInitFunc();
@@ -1148,11 +1198,17 @@ private:
void EmitCoverageFile();
/// Emits the initializer for a uuidof string.
- llvm::Constant *EmitUuidofInitializer(StringRef uuidstr, QualType IIDType);
+ llvm::Constant *EmitUuidofInitializer(StringRef uuidstr);
+
+ /// Determine whether the definition must be emitted; if this returns \c
+ /// false, the definition can be emitted lazily if it's used.
+ bool MustBeEmitted(const ValueDecl *D);
- /// Determine if the given decl can be emitted lazily; this is only relevant
- /// for definitions. The given decl must be either a function or var decl.
- bool MayDeferGeneration(const ValueDecl *D);
+ /// Determine whether the definition can be emitted eagerly, or should be
+ /// delayed until the end of the translation unit. This is relevant for
+ /// definitions whose linkage can change, e.g. implicit function instantions
+ /// which may later be explicitly instantiated.
+ bool MayBeEmittedEagerly(const ValueDecl *D);
/// Check whether we can use a "simpler", more core exceptions personality
/// function.
diff --git a/lib/CodeGen/CodeGenPGO.cpp b/lib/CodeGen/CodeGenPGO.cpp
index b233e3c7d75e..24b035d67598 100644
--- a/lib/CodeGen/CodeGenPGO.cpp
+++ b/lib/CodeGen/CodeGenPGO.cpp
@@ -13,8 +13,10 @@
#include "CodeGenPGO.h"
#include "CodeGenFunction.h"
+#include "CoverageMappingGen.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtVisitor.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/Endian.h"
@@ -24,8 +26,9 @@
using namespace clang;
using namespace CodeGen;
-void CodeGenPGO::setFuncName(llvm::Function *Fn) {
- RawFuncName = Fn->getName();
+void CodeGenPGO::setFuncName(StringRef Name,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ StringRef RawFuncName = Name;
// Function names may be prefixed with a binary '1' to indicate
// that the backend should not modify the symbols due to any platform
@@ -33,172 +36,44 @@ void CodeGenPGO::setFuncName(llvm::Function *Fn) {
if (RawFuncName[0] == '\1')
RawFuncName = RawFuncName.substr(1);
- if (!Fn->hasLocalLinkage()) {
- PrefixedFuncName.reset(new std::string(RawFuncName));
- return;
+ FuncName = RawFuncName;
+ if (llvm::GlobalValue::isLocalLinkage(Linkage)) {
+ // For local symbols, prepend the main file name to distinguish them.
+ // Do not include the full path in the file name since there's no guarantee
+ // that it will stay the same, e.g., if the files are checked out from
+ // version control in different locations.
+ if (CGM.getCodeGenOpts().MainFileName.empty())
+ FuncName = FuncName.insert(0, "<unknown>:");
+ else
+ FuncName = FuncName.insert(0, CGM.getCodeGenOpts().MainFileName + ":");
}
- // For local symbols, prepend the main file name to distinguish them.
- // Do not include the full path in the file name since there's no guarantee
- // that it will stay the same, e.g., if the files are checked out from
- // version control in different locations.
- PrefixedFuncName.reset(new std::string(CGM.getCodeGenOpts().MainFileName));
- if (PrefixedFuncName->empty())
- PrefixedFuncName->assign("<unknown>");
- PrefixedFuncName->append(":");
- PrefixedFuncName->append(RawFuncName);
-}
-
-static llvm::Function *getRegisterFunc(CodeGenModule &CGM) {
- return CGM.getModule().getFunction("__llvm_profile_register_functions");
-}
-
-static llvm::BasicBlock *getOrInsertRegisterBB(CodeGenModule &CGM) {
- // Don't do this for Darwin. compiler-rt uses linker magic.
- if (CGM.getTarget().getTriple().isOSDarwin())
- return nullptr;
-
- // Only need to insert this once per module.
- if (llvm::Function *RegisterF = getRegisterFunc(CGM))
- return &RegisterF->getEntryBlock();
-
- // Construct the function.
- auto *VoidTy = llvm::Type::getVoidTy(CGM.getLLVMContext());
- auto *RegisterFTy = llvm::FunctionType::get(VoidTy, false);
- auto *RegisterF = llvm::Function::Create(RegisterFTy,
- llvm::GlobalValue::InternalLinkage,
- "__llvm_profile_register_functions",
- &CGM.getModule());
- RegisterF->setUnnamedAddr(true);
- if (CGM.getCodeGenOpts().DisableRedZone)
- RegisterF->addFnAttr(llvm::Attribute::NoRedZone);
-
- // Construct and return the entry block.
- auto *BB = llvm::BasicBlock::Create(CGM.getLLVMContext(), "", RegisterF);
- CGBuilderTy Builder(BB);
- Builder.CreateRetVoid();
- return BB;
-}
-
-static llvm::Constant *getOrInsertRuntimeRegister(CodeGenModule &CGM) {
- auto *VoidTy = llvm::Type::getVoidTy(CGM.getLLVMContext());
- auto *VoidPtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- auto *RuntimeRegisterTy = llvm::FunctionType::get(VoidTy, VoidPtrTy, false);
- return CGM.getModule().getOrInsertFunction("__llvm_profile_register_function",
- RuntimeRegisterTy);
-}
-
-static bool isMachO(const CodeGenModule &CGM) {
- return CGM.getTarget().getTriple().isOSBinFormatMachO();
-}
-
-static StringRef getCountersSection(const CodeGenModule &CGM) {
- return isMachO(CGM) ? "__DATA,__llvm_prf_cnts" : "__llvm_prf_cnts";
-}
-
-static StringRef getNameSection(const CodeGenModule &CGM) {
- return isMachO(CGM) ? "__DATA,__llvm_prf_names" : "__llvm_prf_names";
+ // If we're generating a profile, create a variable for the name.
+ if (CGM.getCodeGenOpts().ProfileInstrGenerate)
+ createFuncNameVar(Linkage);
}
-static StringRef getDataSection(const CodeGenModule &CGM) {
- return isMachO(CGM) ? "__DATA,__llvm_prf_data" : "__llvm_prf_data";
-}
-
-llvm::GlobalVariable *CodeGenPGO::buildDataVar() {
- // Create name variable.
- llvm::LLVMContext &Ctx = CGM.getLLVMContext();
- auto *VarName = llvm::ConstantDataArray::getString(Ctx, getFuncName(),
- false);
- auto *Name = new llvm::GlobalVariable(CGM.getModule(), VarName->getType(),
- true, VarLinkage, VarName,
- getFuncVarName("name"));
- Name->setSection(getNameSection(CGM));
- Name->setAlignment(1);
-
- // Create data variable.
- auto *Int32Ty = llvm::Type::getInt32Ty(Ctx);
- auto *Int64Ty = llvm::Type::getInt64Ty(Ctx);
- auto *Int8PtrTy = llvm::Type::getInt8PtrTy(Ctx);
- auto *Int64PtrTy = llvm::Type::getInt64PtrTy(Ctx);
- llvm::Type *DataTypes[] = {
- Int32Ty, Int32Ty, Int64Ty, Int8PtrTy, Int64PtrTy
- };
- auto *DataTy = llvm::StructType::get(Ctx, makeArrayRef(DataTypes));
- llvm::Constant *DataVals[] = {
- llvm::ConstantInt::get(Int32Ty, getFuncName().size()),
- llvm::ConstantInt::get(Int32Ty, NumRegionCounters),
- llvm::ConstantInt::get(Int64Ty, FunctionHash),
- llvm::ConstantExpr::getBitCast(Name, Int8PtrTy),
- llvm::ConstantExpr::getBitCast(RegionCounters, Int64PtrTy)
- };
- auto *Data =
- new llvm::GlobalVariable(CGM.getModule(), DataTy, true, VarLinkage,
- llvm::ConstantStruct::get(DataTy, DataVals),
- getFuncVarName("data"));
-
- // All the data should be packed into an array in its own section.
- Data->setSection(getDataSection(CGM));
- Data->setAlignment(8);
-
- // Hide all these symbols so that we correctly get a copy for each
- // executable. The profile format expects names and counters to be
- // contiguous, so references into shared objects would be invalid.
- if (!llvm::GlobalValue::isLocalLinkage(VarLinkage)) {
- Name->setVisibility(llvm::GlobalValue::HiddenVisibility);
- Data->setVisibility(llvm::GlobalValue::HiddenVisibility);
- RegionCounters->setVisibility(llvm::GlobalValue::HiddenVisibility);
- }
-
- // Make sure the data doesn't get deleted.
- CGM.addUsedGlobal(Data);
- return Data;
+void CodeGenPGO::setFuncName(llvm::Function *Fn) {
+ setFuncName(Fn->getName(), Fn->getLinkage());
}
-void CodeGenPGO::emitInstrumentationData() {
- if (!RegionCounters)
- return;
-
- // Build the data.
- auto *Data = buildDataVar();
-
- // Register the data.
- auto *RegisterBB = getOrInsertRegisterBB(CGM);
- if (!RegisterBB)
- return;
- CGBuilderTy Builder(RegisterBB->getTerminator());
- auto *VoidPtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- Builder.CreateCall(getOrInsertRuntimeRegister(CGM),
- Builder.CreateBitCast(Data, VoidPtrTy));
-}
-
-llvm::Function *CodeGenPGO::emitInitialization(CodeGenModule &CGM) {
- if (!CGM.getCodeGenOpts().ProfileInstrGenerate)
- return nullptr;
-
- assert(CGM.getModule().getFunction("__llvm_profile_init") == nullptr &&
- "profile initialization already emitted");
-
- // Get the function to call at initialization.
- llvm::Constant *RegisterF = getRegisterFunc(CGM);
- if (!RegisterF)
- return nullptr;
-
- // Create the initialization function.
- auto *VoidTy = llvm::Type::getVoidTy(CGM.getLLVMContext());
- auto *F = llvm::Function::Create(llvm::FunctionType::get(VoidTy, false),
- llvm::GlobalValue::InternalLinkage,
- "__llvm_profile_init", &CGM.getModule());
- F->setUnnamedAddr(true);
- F->addFnAttr(llvm::Attribute::NoInline);
- if (CGM.getCodeGenOpts().DisableRedZone)
- F->addFnAttr(llvm::Attribute::NoRedZone);
-
- // Add the basic block and the necessary calls.
- CGBuilderTy Builder(llvm::BasicBlock::Create(CGM.getLLVMContext(), "", F));
- Builder.CreateCall(RegisterF);
- Builder.CreateRetVoid();
-
- return F;
+void CodeGenPGO::createFuncNameVar(llvm::GlobalValue::LinkageTypes Linkage) {
+ // Usually, we want to match the function's linkage, but
+ // available_externally and extern_weak both have the wrong semantics.
+ if (Linkage == llvm::GlobalValue::ExternalWeakLinkage)
+ Linkage = llvm::GlobalValue::LinkOnceAnyLinkage;
+ else if (Linkage == llvm::GlobalValue::AvailableExternallyLinkage)
+ Linkage = llvm::GlobalValue::LinkOnceODRLinkage;
+
+ auto *Value =
+ llvm::ConstantDataArray::getString(CGM.getLLVMContext(), FuncName, false);
+ FuncNameVar =
+ new llvm::GlobalVariable(CGM.getModule(), Value->getType(), true, Linkage,
+ Value, "__llvm_profile_name_" + FuncName);
+
+ // Hide the symbol so that we correctly get a copy for each executable.
+ if (!llvm::GlobalValue::isLocalLinkage(FuncNameVar->getLinkage()))
+ FuncNameVar->setVisibility(llvm::GlobalValue::HiddenVisibility);
}
namespace {
@@ -778,33 +653,18 @@ uint64_t PGOHash::finalize() {
return endian::read<uint64_t, little, unaligned>(Result);
}
-static void emitRuntimeHook(CodeGenModule &CGM) {
- const char *const RuntimeVarName = "__llvm_profile_runtime";
- const char *const RuntimeUserName = "__llvm_profile_runtime_user";
- if (CGM.getModule().getGlobalVariable(RuntimeVarName))
- return;
-
- // Declare the runtime hook.
- llvm::LLVMContext &Ctx = CGM.getLLVMContext();
- auto *Int32Ty = llvm::Type::getInt32Ty(Ctx);
- auto *Var = new llvm::GlobalVariable(CGM.getModule(), Int32Ty, false,
- llvm::GlobalValue::ExternalLinkage,
- nullptr, RuntimeVarName);
-
- // Make a function that uses it.
- auto *User = llvm::Function::Create(llvm::FunctionType::get(Int32Ty, false),
- llvm::GlobalValue::LinkOnceODRLinkage,
- RuntimeUserName, &CGM.getModule());
- User->addFnAttr(llvm::Attribute::NoInline);
- if (CGM.getCodeGenOpts().DisableRedZone)
- User->addFnAttr(llvm::Attribute::NoRedZone);
- CGBuilderTy Builder(llvm::BasicBlock::Create(CGM.getLLVMContext(), "", User));
- auto *Load = Builder.CreateLoad(Var);
- Builder.CreateRet(Load);
-
- // Create a use of the function. Now the definition of the runtime variable
- // should get pulled in, along with any static initializears.
- CGM.addUsedGlobal(User);
+void CodeGenPGO::checkGlobalDecl(GlobalDecl GD) {
+ // Make sure we only emit coverage mapping for one constructor/destructor.
+ // Clang emits several functions for the constructor and the destructor of
+ // a class. Every function is instrumented, but we only want to provide
+ // coverage for one of them. Because of that we only emit the coverage mapping
+ // for the base constructor/destructor.
+ if ((isa<CXXConstructorDecl>(GD.getDecl()) &&
+ GD.getCtorType() != Ctor_Base) ||
+ (isa<CXXDestructorDecl>(GD.getDecl()) &&
+ GD.getDtorType() != Dtor_Base)) {
+ SkipCoverageMapping = true;
+ }
}
void CodeGenPGO::assignRegionCounters(const Decl *D, llvm::Function *Fn) {
@@ -814,28 +674,12 @@ void CodeGenPGO::assignRegionCounters(const Decl *D, llvm::Function *Fn) {
return;
if (D->isImplicit())
return;
+ CGM.ClearUnusedCoverageMapping(D);
setFuncName(Fn);
- // Set the linkage for variables based on the function linkage. Usually, we
- // want to match it, but available_externally and extern_weak both have the
- // wrong semantics.
- VarLinkage = Fn->getLinkage();
- switch (VarLinkage) {
- case llvm::GlobalValue::ExternalWeakLinkage:
- VarLinkage = llvm::GlobalValue::LinkOnceAnyLinkage;
- break;
- case llvm::GlobalValue::AvailableExternallyLinkage:
- VarLinkage = llvm::GlobalValue::LinkOnceODRLinkage;
- break;
- default:
- break;
- }
-
mapRegionCounters(D);
- if (InstrumentRegions) {
- emitRuntimeHook(CGM);
- emitCounterVariables();
- }
+ if (CGM.getCodeGenOpts().CoverageMapping)
+ emitCounterRegionMapping(D);
if (PGOReader) {
SourceManager &SM = CGM.getContext().getSourceManager();
loadRegionCounts(PGOReader, SM.isInMainFile(D->getLocation()));
@@ -860,6 +704,56 @@ void CodeGenPGO::mapRegionCounters(const Decl *D) {
FunctionHash = Walker.Hash.finalize();
}
+void CodeGenPGO::emitCounterRegionMapping(const Decl *D) {
+ if (SkipCoverageMapping)
+ return;
+ // Don't map the functions inside the system headers
+ auto Loc = D->getBody()->getLocStart();
+ if (CGM.getContext().getSourceManager().isInSystemHeader(Loc))
+ return;
+
+ std::string CoverageMapping;
+ llvm::raw_string_ostream OS(CoverageMapping);
+ CoverageMappingGen MappingGen(*CGM.getCoverageMapping(),
+ CGM.getContext().getSourceManager(),
+ CGM.getLangOpts(), RegionCounterMap.get());
+ MappingGen.emitCounterMapping(D, OS);
+ OS.flush();
+
+ if (CoverageMapping.empty())
+ return;
+
+ CGM.getCoverageMapping()->addFunctionMappingRecord(
+ FuncNameVar, FuncName, FunctionHash, CoverageMapping);
+}
+
+void
+CodeGenPGO::emitEmptyCounterMapping(const Decl *D, StringRef FuncName,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ if (SkipCoverageMapping)
+ return;
+ setFuncName(FuncName, Linkage);
+
+ // Don't map the functions inside the system headers
+ auto Loc = D->getBody()->getLocStart();
+ if (CGM.getContext().getSourceManager().isInSystemHeader(Loc))
+ return;
+
+ std::string CoverageMapping;
+ llvm::raw_string_ostream OS(CoverageMapping);
+ CoverageMappingGen MappingGen(*CGM.getCoverageMapping(),
+ CGM.getContext().getSourceManager(),
+ CGM.getLangOpts());
+ MappingGen.emitEmptyMapping(D, OS);
+ OS.flush();
+
+ if (CoverageMapping.empty())
+ return;
+
+ CGM.getCoverageMapping()->addFunctionMappingRecord(
+ FuncNameVar, FuncName, FunctionHash, CoverageMapping);
+}
+
void CodeGenPGO::computeRegionCounts(const Decl *D) {
StmtCountMap.reset(new llvm::DenseMap<const Stmt *, uint64_t>);
ComputeRegionCounts Walker(*StmtCountMap, *this);
@@ -891,50 +785,36 @@ CodeGenPGO::applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
Fn->addFnAttr(llvm::Attribute::Cold);
}
-void CodeGenPGO::emitCounterVariables() {
- llvm::LLVMContext &Ctx = CGM.getLLVMContext();
- llvm::ArrayType *CounterTy = llvm::ArrayType::get(llvm::Type::getInt64Ty(Ctx),
- NumRegionCounters);
- RegionCounters =
- new llvm::GlobalVariable(CGM.getModule(), CounterTy, false, VarLinkage,
- llvm::Constant::getNullValue(CounterTy),
- getFuncVarName("counters"));
- RegionCounters->setAlignment(8);
- RegionCounters->setSection(getCountersSection(CGM));
-}
-
void CodeGenPGO::emitCounterIncrement(CGBuilderTy &Builder, unsigned Counter) {
- if (!RegionCounters)
+ if (!CGM.getCodeGenOpts().ProfileInstrGenerate || !RegionCounterMap)
return;
- llvm::Value *Addr =
- Builder.CreateConstInBoundsGEP2_64(RegionCounters, 0, Counter);
- llvm::Value *Count = Builder.CreateLoad(Addr, "pgocount");
- Count = Builder.CreateAdd(Count, Builder.getInt64(1));
- Builder.CreateStore(Count, Addr);
+ if (!Builder.GetInsertPoint())
+ return;
+ auto *I8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ Builder.CreateCall4(CGM.getIntrinsic(llvm::Intrinsic::instrprof_increment),
+ llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
+ Builder.getInt64(FunctionHash),
+ Builder.getInt32(NumRegionCounters),
+ Builder.getInt32(Counter));
}
void CodeGenPGO::loadRegionCounts(llvm::IndexedInstrProfReader *PGOReader,
bool IsInMainFile) {
CGM.getPGOStats().addVisited(IsInMainFile);
- RegionCounts.reset(new std::vector<uint64_t>);
- uint64_t Hash;
- if (PGOReader->getFunctionCounts(getFuncName(), Hash, *RegionCounts)) {
- CGM.getPGOStats().addMissing(IsInMainFile);
- RegionCounts.reset();
- } else if (Hash != FunctionHash ||
- RegionCounts->size() != NumRegionCounters) {
- CGM.getPGOStats().addMismatched(IsInMainFile);
- RegionCounts.reset();
+ RegionCounts.clear();
+ if (std::error_code EC =
+ PGOReader->getFunctionCounts(FuncName, FunctionHash, RegionCounts)) {
+ if (EC == llvm::instrprof_error::unknown_function)
+ CGM.getPGOStats().addMissing(IsInMainFile);
+ else if (EC == llvm::instrprof_error::hash_mismatch)
+ CGM.getPGOStats().addMismatched(IsInMainFile);
+ else if (EC == llvm::instrprof_error::malformed)
+ // TODO: Consider a more specific warning for this case.
+ CGM.getPGOStats().addMismatched(IsInMainFile);
+ RegionCounts.clear();
}
}
-void CodeGenPGO::destroyRegionCounters() {
- RegionCounterMap.reset();
- StmtCountMap.reset();
- RegionCounts.reset();
- RegionCounters = nullptr;
-}
-
/// \brief Calculate what to divide by to scale weights.
///
/// Given the maximum weight, calculate a divisor that will scale all the
diff --git a/lib/CodeGen/CodeGenPGO.h b/lib/CodeGen/CodeGenPGO.h
index 2f4aa660bea3..431c850ef81e 100644
--- a/lib/CodeGen/CodeGenPGO.h
+++ b/lib/CodeGen/CodeGenPGO.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CODEGENPGO_H
-#define CLANG_CODEGEN_CODEGENPGO_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENPGO_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENPGO_H
#include "CGBuilder.h"
#include "CodeGenModule.h"
@@ -31,34 +31,28 @@ class RegionCounter;
class CodeGenPGO {
private:
CodeGenModule &CGM;
- std::unique_ptr<std::string> PrefixedFuncName;
- StringRef RawFuncName;
- llvm::GlobalValue::LinkageTypes VarLinkage;
+ std::string FuncName;
+ llvm::GlobalVariable *FuncNameVar;
unsigned NumRegionCounters;
uint64_t FunctionHash;
- llvm::GlobalVariable *RegionCounters;
std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionCounterMap;
std::unique_ptr<llvm::DenseMap<const Stmt *, uint64_t>> StmtCountMap;
- std::unique_ptr<std::vector<uint64_t>> RegionCounts;
+ std::vector<uint64_t> RegionCounts;
uint64_t CurrentRegionCount;
+ /// \brief A flag that is set to true when this function doesn't need
+ /// to have coverage mapping data.
+ bool SkipCoverageMapping;
public:
CodeGenPGO(CodeGenModule &CGM)
- : CGM(CGM), NumRegionCounters(0), FunctionHash(0),
- RegionCounters(nullptr), CurrentRegionCount(0) {}
+ : CGM(CGM), NumRegionCounters(0), FunctionHash(0), CurrentRegionCount(0),
+ SkipCoverageMapping(false) {}
/// Whether or not we have PGO region data for the current function. This is
/// false both when we have no data at all and when our data has been
/// discarded.
- bool haveRegionCounts() const { return RegionCounts != nullptr; }
-
- /// Get the string used to identify this function in the profile data.
- /// For functions with local linkage, this includes the main file name.
- StringRef getFuncName() const { return StringRef(*PrefixedFuncName); }
- std::string getFuncVarName(StringRef VarName) const {
- return ("__llvm_profile_" + VarName + "_" + RawFuncName).str();
- }
+ bool haveRegionCounts() const { return !RegionCounts.empty(); }
/// Return the counter value of the current region.
uint64_t getCurrentRegionCount() const { return CurrentRegionCount; }
@@ -99,21 +93,21 @@ public:
llvm::MDNode *createBranchWeights(ArrayRef<uint64_t> Weights);
llvm::MDNode *createLoopWeights(const Stmt *Cond, RegionCounter &Cnt);
+ /// Check if we need to emit coverage mapping for a given declaration
+ void checkGlobalDecl(GlobalDecl GD);
/// Assign counters to regions and configure them for PGO of a given
/// function. Does nothing if instrumentation is not enabled and either
/// generates global variables or associates PGO data with each of the
/// counters depending on whether we are generating or using instrumentation.
void assignRegionCounters(const Decl *D, llvm::Function *Fn);
- /// Emit static data structures for instrumentation data.
- void emitInstrumentationData();
- /// Clean up region counter state. Must be called if assignRegionCounters is
- /// used.
- void destroyRegionCounters();
- /// Emit static initialization code, if any.
- static llvm::Function *emitInitialization(CodeGenModule &CGM);
-
+ /// Emit a coverage mapping range with a counter zero
+ /// for an unused declaration.
+ void emitEmptyCounterMapping(const Decl *D, StringRef FuncName,
+ llvm::GlobalValue::LinkageTypes Linkage);
private:
void setFuncName(llvm::Function *Fn);
+ void setFuncName(StringRef Name, llvm::GlobalValue::LinkageTypes Linkage);
+ void createFuncNameVar(llvm::GlobalValue::LinkageTypes Linkage);
void mapRegionCounters(const Decl *D);
void computeRegionCounts(const Decl *D);
void applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
@@ -121,7 +115,7 @@ private:
void loadRegionCounts(llvm::IndexedInstrProfReader *PGOReader,
bool IsInMainFile);
void emitCounterVariables();
- llvm::GlobalVariable *buildDataVar();
+ void emitCounterRegionMapping(const Decl *D);
/// Emit code to increment the counter at the given index
void emitCounterIncrement(CGBuilderTy &Builder, unsigned Counter);
@@ -138,7 +132,7 @@ private:
uint64_t getRegionCount(unsigned Counter) {
if (!haveRegionCounts())
return 0;
- return (*RegionCounts)[Counter];
+ return RegionCounts[Counter];
}
friend class RegionCounter;
diff --git a/lib/CodeGen/CodeGenTBAA.h b/lib/CodeGen/CodeGenTBAA.h
index 0ad4be24fd24..632caddce980 100644
--- a/lib/CodeGen/CodeGenTBAA.h
+++ b/lib/CodeGen/CodeGenTBAA.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CODEGENTBAA_H
-#define CLANG_CODEGEN_CODEGENTBAA_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTBAA_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENTBAA_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index d4e22623e029..67a9fbec2690 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -81,7 +81,7 @@ void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
-llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T){
+llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
llvm::Type *R = ConvertType(T);
// If this is a non-bool type, don't map it.
@@ -115,8 +115,9 @@ isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
// If we have already checked this type (maybe the same type is used by-value
// multiple times in multiple structure fields, don't check again.
- if (!AlreadyChecked.insert(RD)) return true;
-
+ if (!AlreadyChecked.insert(RD).second)
+ return true;
+
const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
// If this type is already laid out, converting it is a noop.
@@ -187,6 +188,11 @@ static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) {
/// we've temporarily deferred expanding the type because we're in a recursive
/// context.
bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
+ // Some ABIs cannot have their member pointers represented in IR unless
+ // certain circumstances have been reached.
+ if (const auto *MPT = Ty->getAs<MemberPointerType>())
+ return getCXXABI().isMemberPointerConvertible(MPT);
+
// If this isn't a tagged type, we can convert it!
const TagType *TT = Ty->getAs<TagType>();
if (!TT) return true;
@@ -194,7 +200,7 @@ bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
// Incomplete types cannot be converted.
if (TT->isIncompleteType())
return false;
-
+
// If this is an enum, then it is always safe to convert.
const RecordType *RT = dyn_cast<RecordType>(TT);
if (!RT) return true;
@@ -353,9 +359,10 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case BuiltinType::Half:
// Half FP can either be storage-only (lowered to i16) or native.
- ResultType = getTypeForFormat(getLLVMContext(),
- Context.getFloatTypeSemantics(T),
- Context.getLangOpts().NativeHalfType);
+ ResultType =
+ getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T),
+ Context.getLangOpts().NativeHalfType ||
+ Context.getLangOpts().HalfArgsAndReturns);
break;
case BuiltinType::Float:
case BuiltinType::Double:
@@ -399,7 +406,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
llvm_unreachable("Unexpected undeduced auto type!");
case Type::Complex: {
llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
- ResultType = llvm::StructType::get(EltTy, EltTy, NULL);
+ ResultType = llvm::StructType::get(EltTy, EltTy, nullptr);
break;
}
case Type::LValueReference:
@@ -494,7 +501,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
// While we're converting the parameter types for a function, we don't want
// to recursively convert any pointed-to structs. Converting directly-used
// structs is ok though.
- if (!RecordsBeingLaidOut.insert(Ty)) {
+ if (!RecordsBeingLaidOut.insert(Ty).second) {
ResultType = llvm::StructType::get(getLLVMContext());
SkippedLayout = true;
@@ -581,6 +588,8 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
}
case Type::MemberPointer: {
+ if (!getCXXABI().isMemberPointerConvertible(cast<MemberPointerType>(Ty)))
+ return llvm::StructType::create(getLLVMContext());
ResultType =
getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(Ty));
break;
@@ -648,7 +657,8 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
}
// Okay, this is a definition of a type. Compile the implementation now.
- bool InsertResult = RecordsBeingLaidOut.insert(Key); (void)InsertResult;
+ bool InsertResult = RecordsBeingLaidOut.insert(Key).second;
+ (void)InsertResult;
assert(InsertResult && "Recursively compiling a struct?");
// Force conversion of non-virtual base classes recursively.
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index fe155b53b3c4..64c5799ccec9 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CODEGENTYPES_H
-#define CLANG_CODEGEN_CODEGENTYPES_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H
#include "CGCall.h"
#include "clang/AST/GlobalDecl.h"
@@ -22,39 +22,95 @@
#include <vector>
namespace llvm {
- class FunctionType;
- class Module;
- class DataLayout;
- class Type;
- class LLVMContext;
- class StructType;
+class FunctionType;
+class Module;
+class DataLayout;
+class Type;
+class LLVMContext;
+class StructType;
}
namespace clang {
- class ABIInfo;
- class ASTContext;
- template <typename> class CanQual;
- class CXXConstructorDecl;
- class CXXDestructorDecl;
- class CXXMethodDecl;
- class CodeGenOptions;
- class FieldDecl;
- class FunctionProtoType;
- class ObjCInterfaceDecl;
- class ObjCIvarDecl;
- class PointerType;
- class QualType;
- class RecordDecl;
- class TagDecl;
- class TargetInfo;
- class Type;
- typedef CanQual<Type> CanQualType;
+class ABIInfo;
+class ASTContext;
+template <typename> class CanQual;
+class CXXConstructorDecl;
+class CXXDestructorDecl;
+class CXXMethodDecl;
+class CodeGenOptions;
+class FieldDecl;
+class FunctionProtoType;
+class ObjCInterfaceDecl;
+class ObjCIvarDecl;
+class PointerType;
+class QualType;
+class RecordDecl;
+class TagDecl;
+class TargetInfo;
+class Type;
+typedef CanQual<Type> CanQualType;
namespace CodeGen {
- class CGCXXABI;
- class CGRecordLayout;
- class CodeGenModule;
- class RequiredArgs;
+class CGCXXABI;
+class CGRecordLayout;
+class CodeGenModule;
+class RequiredArgs;
+
+enum class StructorType {
+ Complete, // constructor or destructor
+ Base, // constructor or destructor
+ Deleting // destructor only
+};
+
+inline CXXCtorType toCXXCtorType(StructorType T) {
+ switch (T) {
+ case StructorType::Complete:
+ return Ctor_Complete;
+ case StructorType::Base:
+ return Ctor_Base;
+ case StructorType::Deleting:
+ llvm_unreachable("cannot have a deleting ctor");
+ }
+ llvm_unreachable("not a StructorType");
+}
+
+inline StructorType getFromCtorType(CXXCtorType T) {
+ switch (T) {
+ case Ctor_Complete:
+ return StructorType::Complete;
+ case Ctor_Base:
+ return StructorType::Base;
+ case Ctor_Comdat:
+ llvm_unreachable("not expecting a COMDAT");
+ }
+ llvm_unreachable("not a CXXCtorType");
+}
+
+inline CXXDtorType toCXXDtorType(StructorType T) {
+ switch (T) {
+ case StructorType::Complete:
+ return Dtor_Complete;
+ case StructorType::Base:
+ return Dtor_Base;
+ case StructorType::Deleting:
+ return Dtor_Deleting;
+ }
+ llvm_unreachable("not a StructorType");
+}
+
+inline StructorType getFromDtorType(CXXDtorType T) {
+ switch (T) {
+ case Dtor_Deleting:
+ return StructorType::Deleting;
+ case Dtor_Complete:
+ return StructorType::Complete;
+ case Dtor_Base:
+ return StructorType::Base;
+ case Dtor_Comdat:
+ llvm_unreachable("not expecting a COMDAT");
+ }
+ llvm_unreachable("not a CXXDtorType");
+}
/// CodeGenTypes - This class organizes the cross-module state that is used
/// while lowering AST types to LLVM types.
@@ -185,18 +241,15 @@ public:
QualType receiverType);
const CGFunctionInfo &arrangeCXXMethodDeclaration(const CXXMethodDecl *MD);
- const CGFunctionInfo &arrangeCXXConstructorDeclaration(
- const CXXConstructorDecl *D,
- CXXCtorType Type);
+ const CGFunctionInfo &arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
+ StructorType Type);
const CGFunctionInfo &arrangeCXXConstructorCall(const CallArgList &Args,
const CXXConstructorDecl *D,
CXXCtorType CtorKind,
unsigned ExtraArgs);
- const CGFunctionInfo &arrangeCXXDestructor(const CXXDestructorDecl *D,
- CXXDtorType Type);
-
const CGFunctionInfo &arrangeFreeFunctionCall(const CallArgList &Args,
- const FunctionType *Ty);
+ const FunctionType *Ty,
+ bool ChainCall);
const CGFunctionInfo &arrangeFreeFunctionCall(QualType ResTy,
const CallArgList &args,
FunctionType::ExtInfo info,
@@ -207,6 +260,7 @@ public:
const CGFunctionInfo &arrangeCXXMethodCall(const CallArgList &args,
const FunctionProtoType *type,
RequiredArgs required);
+ const CGFunctionInfo &arrangeMSMemberPointerThunk(const CXXMethodDecl *MD);
const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty);
const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionNoProtoType> Ty);
@@ -220,7 +274,8 @@ public:
///
/// \param argTypes - must all actually be canonical as params
const CGFunctionInfo &arrangeLLVMFunctionInfo(CanQualType returnType,
- bool IsInstanceMethod,
+ bool instanceMethod,
+ bool chainCall,
ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
RequiredArgs args);
@@ -239,11 +294,10 @@ public: // These are internal details of CGT that shouldn't be used externally.
/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
llvm::StructType *ConvertRecordDeclType(const RecordDecl *TD);
- /// GetExpandedTypes - Expand the type \arg Ty into the LLVM
- /// argument types it would be passed as on the provided vector \arg
- /// ArgTys. See ABIArgInfo::Expand.
- void GetExpandedTypes(QualType type,
- SmallVectorImpl<llvm::Type*> &expanded);
+ /// getExpandedTypes - Expand the type \arg Ty into the LLVM
+ /// argument types it would be passed as. See ABIArgInfo::Expand.
+ void getExpandedTypes(QualType Ty,
+ SmallVectorImpl<llvm::Type *>::iterator &TI);
/// IsZeroInitializable - Return whether a type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
diff --git a/lib/CodeGen/CoverageMappingGen.cpp b/lib/CodeGen/CoverageMappingGen.cpp
new file mode 100644
index 000000000000..6f159d47375e
--- /dev/null
+++ b/lib/CodeGen/CoverageMappingGen.cpp
@@ -0,0 +1,1174 @@
+//===--- CoverageMappingGen.cpp - Coverage mapping generation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Instrumentation-based code coverage mapping generator
+//
+//===----------------------------------------------------------------------===//
+
+#include "CoverageMappingGen.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ProfileData/CoverageMapping.h"
+#include "llvm/ProfileData/CoverageMappingReader.h"
+#include "llvm/ProfileData/CoverageMappingWriter.h"
+#include "llvm/ProfileData/InstrProfReader.h"
+#include "llvm/Support/FileSystem.h"
+
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm::coverage;
+
+void CoverageSourceInfo::SourceRangeSkipped(SourceRange Range) {
+ SkippedRanges.push_back(Range);
+}
+
+namespace {
+
+/// \brief A region of source code that can be mapped to a counter.
+class SourceMappingRegion {
+public:
+ enum RegionFlags {
+ /// \brief This region won't be emitted if it wasn't extended.
+ /// This is useful so that we won't emit source ranges for single tokens
+ /// that we don't really care that much about, like:
+ /// the '(' token in #define MACRO (
+ IgnoreIfNotExtended = 0x0001,
+ };
+
+private:
+ FileID File, MacroArgumentFile;
+
+ Counter Count;
+
+ /// \brief A statement that initiated the count of Zero.
+ ///
+ /// This initiator statement is useful to prevent merging of unreachable
+ /// regions with different statements that caused the counter to become
+ /// unreachable.
+ const Stmt *UnreachableInitiator;
+
+ /// \brief A statement that separates certain mapping regions into groups.
+ ///
+ /// The group statement is sometimes useful when we are emitting the source
+ /// regions not in their correct lexical order, e.g. the regions for the
+ /// incrementation expression in the 'for' construct. By marking the regions
+ /// in the incrementation expression with the group statement, we avoid the
+ /// merging of the regions from the incrementation expression and the loop's
+ /// body.
+ const Stmt *Group;
+
+ /// \brief The region's starting location.
+ SourceLocation LocStart;
+
+ /// \brief The region's ending location.
+ SourceLocation LocEnd, AlternativeLocEnd;
+ unsigned Flags;
+
+public:
+ SourceMappingRegion(FileID File, FileID MacroArgumentFile, Counter Count,
+ const Stmt *UnreachableInitiator, const Stmt *Group,
+ SourceLocation LocStart, SourceLocation LocEnd,
+ unsigned Flags = 0)
+ : File(File), MacroArgumentFile(MacroArgumentFile), Count(Count),
+ UnreachableInitiator(UnreachableInitiator), Group(Group),
+ LocStart(LocStart), LocEnd(LocEnd), AlternativeLocEnd(LocStart),
+ Flags(Flags) {}
+
+ const FileID &getFile() const { return File; }
+
+ const Counter &getCounter() const { return Count; }
+
+ const SourceLocation &getStartLoc() const { return LocStart; }
+
+ const SourceLocation &getEndLoc(const SourceManager &SM) const {
+ if (SM.getFileID(LocEnd) != File)
+ return AlternativeLocEnd;
+ return LocEnd;
+ }
+
+ bool hasFlag(RegionFlags Flag) const { return (Flags & Flag) != 0; }
+
+ void setFlag(RegionFlags Flag) { Flags |= Flag; }
+
+ void clearFlag(RegionFlags Flag) { Flags &= ~Flag; }
+
+ /// \brief Return true if two regions can be merged together.
+ bool isMergeable(SourceMappingRegion &R) {
+ // FIXME: We allow merging regions with a gap in between them. Should we?
+ return File == R.File && MacroArgumentFile == R.MacroArgumentFile &&
+ Count == R.Count && UnreachableInitiator == R.UnreachableInitiator &&
+ Group == R.Group;
+ }
+
+ /// \brief A comparison that sorts such that mergeable regions are adjacent.
+ friend bool operator<(const SourceMappingRegion &LHS,
+ const SourceMappingRegion &RHS) {
+ return std::tie(LHS.File, LHS.MacroArgumentFile, LHS.Count,
+ LHS.UnreachableInitiator, LHS.Group) <
+ std::tie(RHS.File, RHS.MacroArgumentFile, RHS.Count,
+ RHS.UnreachableInitiator, RHS.Group);
+ }
+};
+
+/// \brief Provides the common functionality for the different
+/// coverage mapping region builders.
+class CoverageMappingBuilder {
+public:
+ CoverageMappingModuleGen &CVM;
+ SourceManager &SM;
+ const LangOptions &LangOpts;
+
+private:
+ struct FileInfo {
+ /// \brief The file id that will be used by the coverage mapping system.
+ unsigned CovMappingFileID;
+ const FileEntry *Entry;
+
+ FileInfo(unsigned CovMappingFileID, const FileEntry *Entry)
+ : CovMappingFileID(CovMappingFileID), Entry(Entry) {}
+ };
+
+ /// \brief This mapping maps clang's FileIDs to file ids used
+ /// by the coverage mapping system and clang's file entries.
+ llvm::SmallDenseMap<FileID, FileInfo, 8> FileIDMapping;
+
+public:
+ /// \brief The statement that corresponds to the current source group.
+ const Stmt *CurrentSourceGroup;
+
+ /// \brief The statement the initiated the current unreachable region.
+ const Stmt *CurrentUnreachableRegionInitiator;
+
+ /// \brief The coverage mapping regions for this function
+ llvm::SmallVector<CounterMappingRegion, 32> MappingRegions;
+ /// \brief The source mapping regions for this function.
+ std::vector<SourceMappingRegion> SourceRegions;
+
+ CoverageMappingBuilder(CoverageMappingModuleGen &CVM, SourceManager &SM,
+ const LangOptions &LangOpts)
+ : CVM(CVM), SM(SM), LangOpts(LangOpts),
+ CurrentSourceGroup(nullptr),
+ CurrentUnreachableRegionInitiator(nullptr) {}
+
+ /// \brief Return the precise end location for the given token.
+ SourceLocation getPreciseTokenLocEnd(SourceLocation Loc) {
+ return Lexer::getLocForEndOfToken(SM.getSpellingLoc(Loc), 0, SM, LangOpts);
+ }
+
+ /// \brief Create the mapping that maps from the function's file ids to
+ /// the indices for the translation unit's filenames.
+ void createFileIDMapping(SmallVectorImpl<unsigned> &Mapping) {
+ Mapping.resize(FileIDMapping.size(), 0);
+ for (const auto &I : FileIDMapping)
+ Mapping[I.second.CovMappingFileID] = CVM.getFileID(I.second.Entry);
+ }
+
+ /// \brief Get the coverage mapping file id that corresponds to the given
+ /// clang file id. If such file id doesn't exist, it gets added to the
+ /// mapping that maps from clang's file ids to coverage mapping file ids.
+ /// Return true if there was an error getting the coverage mapping file id.
+ /// An example of an when this function fails is when the region tries
+ /// to get a coverage file id for a location in a built-in macro.
+ bool getCoverageFileID(SourceLocation LocStart, FileID File,
+ FileID SpellingFile, unsigned &Result) {
+ auto Mapping = FileIDMapping.find(File);
+ if (Mapping != FileIDMapping.end()) {
+ Result = Mapping->second.CovMappingFileID;
+ return false;
+ }
+
+ auto Entry = SM.getFileEntryForID(SpellingFile);
+ if (!Entry)
+ return true;
+
+ Result = FileIDMapping.size();
+ FileIDMapping.insert(std::make_pair(File, FileInfo(Result, Entry)));
+ createFileExpansionRegion(LocStart, File);
+ return false;
+ }
+
+ /// \brief Get the coverage mapping file id that corresponds to the given
+ /// clang file id.
+ /// Return true if there was an error getting the coverage mapping file id.
+ bool getExistingCoverageFileID(FileID File, unsigned &Result) {
+ // Make sure that the file is valid.
+ if (File.isInvalid())
+ return true;
+ auto Mapping = FileIDMapping.find(File);
+ if (Mapping != FileIDMapping.end()) {
+ Result = Mapping->second.CovMappingFileID;
+ return false;
+ }
+ return true;
+ }
+
+ /// \brief Return true if the given clang's file id has a corresponding
+ /// coverage file id.
+ bool hasExistingCoverageFileID(FileID File) const {
+ return FileIDMapping.count(File);
+ }
+
+ /// \brief Gather all the regions that were skipped by the preprocessor
+ /// using the constructs like #if.
+ void gatherSkippedRegions() {
+ /// An array of the minimum lineStarts and the maximum lineEnds
+ /// for mapping regions from the appropriate source files.
+ llvm::SmallVector<std::pair<unsigned, unsigned>, 8> FileLineRanges;
+ FileLineRanges.resize(
+ FileIDMapping.size(),
+ std::make_pair(std::numeric_limits<unsigned>::max(), 0));
+ for (const auto &R : MappingRegions) {
+ FileLineRanges[R.FileID].first =
+ std::min(FileLineRanges[R.FileID].first, R.LineStart);
+ FileLineRanges[R.FileID].second =
+ std::max(FileLineRanges[R.FileID].second, R.LineEnd);
+ }
+
+ auto SkippedRanges = CVM.getSourceInfo().getSkippedRanges();
+ for (const auto &I : SkippedRanges) {
+ auto LocStart = I.getBegin();
+ auto LocEnd = I.getEnd();
+ auto FileStart = SM.getFileID(LocStart);
+ if (!hasExistingCoverageFileID(FileStart))
+ continue;
+ auto ActualFileStart = SM.getDecomposedSpellingLoc(LocStart).first;
+ if (ActualFileStart != SM.getDecomposedSpellingLoc(LocEnd).first)
+ // Ignore regions that span across multiple files.
+ continue;
+
+ unsigned CovFileID;
+ if (getCoverageFileID(LocStart, FileStart, ActualFileStart, CovFileID))
+ continue;
+ unsigned LineStart = SM.getSpellingLineNumber(LocStart);
+ unsigned ColumnStart = SM.getSpellingColumnNumber(LocStart);
+ unsigned LineEnd = SM.getSpellingLineNumber(LocEnd);
+ unsigned ColumnEnd = SM.getSpellingColumnNumber(LocEnd);
+ CounterMappingRegion Region(Counter(), CovFileID, LineStart, ColumnStart,
+ LineEnd, ColumnEnd, false,
+ CounterMappingRegion::SkippedRegion);
+ // Make sure that we only collect the regions that are inside
+ // the souce code of this function.
+ if (Region.LineStart >= FileLineRanges[CovFileID].first &&
+ Region.LineEnd <= FileLineRanges[CovFileID].second)
+ MappingRegions.push_back(Region);
+ }
+ }
+
+ /// \brief Create a mapping region that correponds to an expansion of
+ /// a macro or an embedded include.
+ void createFileExpansionRegion(SourceLocation Loc, FileID ExpandedFile) {
+ SourceLocation LocStart;
+ if (Loc.isMacroID())
+ LocStart = SM.getImmediateExpansionRange(Loc).first;
+ else {
+ LocStart = SM.getIncludeLoc(ExpandedFile);
+ if (LocStart.isInvalid())
+ return; // This file has no expansion region.
+ }
+
+ auto File = SM.getFileID(LocStart);
+ auto SpellingFile = SM.getDecomposedSpellingLoc(LocStart).first;
+ unsigned CovFileID, ExpandedFileID;
+ if (getExistingCoverageFileID(ExpandedFile, ExpandedFileID))
+ return;
+ if (getCoverageFileID(LocStart, File, SpellingFile, CovFileID))
+ return;
+ unsigned LineStart = SM.getSpellingLineNumber(LocStart);
+ unsigned ColumnStart = SM.getSpellingColumnNumber(LocStart);
+ unsigned LineEnd = LineStart;
+ // Compute the end column manually as Lexer::getLocForEndOfToken doesn't
+ // give the correct result in all cases.
+ unsigned ColumnEnd =
+ ColumnStart +
+ Lexer::MeasureTokenLength(SM.getSpellingLoc(LocStart), SM, LangOpts);
+
+ MappingRegions.push_back(CounterMappingRegion(
+ Counter(), CovFileID, LineStart, ColumnStart, LineEnd, ColumnEnd,
+ false, CounterMappingRegion::ExpansionRegion));
+ MappingRegions.back().ExpandedFileID = ExpandedFileID;
+ }
+
+ /// \brief Enter a source region group that is identified by the given
+ /// statement.
+ /// It's not possible to enter a group when there is already
+ /// another group present.
+ void beginSourceRegionGroup(const Stmt *Group) {
+ assert(!CurrentSourceGroup);
+ CurrentSourceGroup = Group;
+ }
+
+ /// \brief Exit the current source region group.
+ void endSourceRegionGroup() { CurrentSourceGroup = nullptr; }
+
+ /// \brief Associate a counter with a given source code range.
+ void mapSourceCodeRange(SourceLocation LocStart, SourceLocation LocEnd,
+ Counter Count, const Stmt *UnreachableInitiator,
+ const Stmt *SourceGroup, unsigned Flags = 0,
+ FileID MacroArgumentFile = FileID()) {
+ if (SM.isMacroArgExpansion(LocStart)) {
+ // Map the code range with the macro argument's value.
+ mapSourceCodeRange(SM.getImmediateSpellingLoc(LocStart),
+ SM.getImmediateSpellingLoc(LocEnd), Count,
+ UnreachableInitiator, SourceGroup, Flags,
+ SM.getFileID(LocStart));
+ // Map the code range where the macro argument is referenced.
+ SourceLocation RefLocStart(SM.getImmediateExpansionRange(LocStart).first);
+ SourceLocation RefLocEnd(RefLocStart);
+ if (SM.isMacroArgExpansion(RefLocStart))
+ mapSourceCodeRange(RefLocStart, RefLocEnd, Count, UnreachableInitiator,
+ SourceGroup, 0, SM.getFileID(RefLocStart));
+ else
+ mapSourceCodeRange(RefLocStart, RefLocEnd, Count, UnreachableInitiator,
+ SourceGroup);
+ return;
+ }
+ auto File = SM.getFileID(LocStart);
+ // Make sure that the file id is valid.
+ if (File.isInvalid())
+ return;
+ SourceRegions.emplace_back(File, MacroArgumentFile, Count,
+ UnreachableInitiator, SourceGroup, LocStart,
+ LocEnd, Flags);
+ }
+
+ void mapSourceCodeRange(SourceLocation LocStart, SourceLocation LocEnd,
+ Counter Count, unsigned Flags = 0) {
+ mapSourceCodeRange(LocStart, LocEnd, Count,
+ CurrentUnreachableRegionInitiator, CurrentSourceGroup,
+ Flags);
+ }
+
+ /// \brief Generate the coverage counter mapping regions from collected
+ /// source regions.
+ void emitSourceRegions() {
+ std::sort(SourceRegions.begin(), SourceRegions.end());
+
+ for (auto I = SourceRegions.begin(), E = SourceRegions.end(); I != E; ++I) {
+ // Keep the original start location of this region.
+ SourceLocation LocStart = I->getStartLoc();
+ SourceLocation LocEnd = I->getEndLoc(SM);
+
+ bool Ignore = I->hasFlag(SourceMappingRegion::IgnoreIfNotExtended);
+ // We need to handle mergeable regions together.
+ for (auto Next = I + 1; Next != E && Next->isMergeable(*I); ++Next) {
+ ++I;
+ LocStart = std::min(LocStart, I->getStartLoc());
+ LocEnd = std::max(LocEnd, I->getEndLoc(SM));
+ // FIXME: Should we && together the Ignore flag of multiple regions?
+ Ignore = false;
+ }
+ if (Ignore)
+ continue;
+
+ // Find the spilling locations for the mapping region.
+ LocEnd = getPreciseTokenLocEnd(LocEnd);
+ unsigned LineStart = SM.getSpellingLineNumber(LocStart);
+ unsigned ColumnStart = SM.getSpellingColumnNumber(LocStart);
+ unsigned LineEnd = SM.getSpellingLineNumber(LocEnd);
+ unsigned ColumnEnd = SM.getSpellingColumnNumber(LocEnd);
+
+ auto SpellingFile = SM.getDecomposedSpellingLoc(LocStart).first;
+ unsigned CovFileID;
+ if (getCoverageFileID(LocStart, I->getFile(), SpellingFile, CovFileID))
+ continue;
+
+ assert(LineStart <= LineEnd);
+ MappingRegions.push_back(CounterMappingRegion(
+ I->getCounter(), CovFileID, LineStart, ColumnStart, LineEnd,
+ ColumnEnd, false, CounterMappingRegion::CodeRegion));
+ }
+ }
+};
+
+/// \brief Creates unreachable coverage regions for the functions that
+/// are not emitted.
+struct EmptyCoverageMappingBuilder : public CoverageMappingBuilder {
+ EmptyCoverageMappingBuilder(CoverageMappingModuleGen &CVM, SourceManager &SM,
+ const LangOptions &LangOpts)
+ : CoverageMappingBuilder(CVM, SM, LangOpts) {}
+
+ void VisitDecl(const Decl *D) {
+ if (!D->hasBody())
+ return;
+ auto Body = D->getBody();
+ mapSourceCodeRange(Body->getLocStart(), Body->getLocEnd(), Counter());
+ }
+
+ /// \brief Write the mapping data to the output stream
+ void write(llvm::raw_ostream &OS) {
+ emitSourceRegions();
+ SmallVector<unsigned, 16> FileIDMapping;
+ createFileIDMapping(FileIDMapping);
+
+ CoverageMappingWriter Writer(FileIDMapping, None, MappingRegions);
+ Writer.write(OS);
+ }
+};
+
+/// \brief A StmtVisitor that creates coverage mapping regions which map
+/// from the source code locations to the PGO counters.
+struct CounterCoverageMappingBuilder
+ : public CoverageMappingBuilder,
+ public ConstStmtVisitor<CounterCoverageMappingBuilder> {
+ /// \brief The map of statements to count values.
+ llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
+
+ Counter CurrentRegionCount;
+
+ CounterExpressionBuilder Builder;
+
+ /// \brief Return a counter that represents the
+ /// expression that subracts rhs from lhs.
+ Counter subtractCounters(Counter LHS, Counter RHS) {
+ return Builder.subtract(LHS, RHS);
+ }
+
+ /// \brief Return a counter that represents the
+ /// the exression that adds lhs and rhs.
+ Counter addCounters(Counter LHS, Counter RHS) {
+ return Builder.add(LHS, RHS);
+ }
+
+ /// \brief Return the region counter for the given statement.
+ /// This should only be called on statements that have a dedicated counter.
+ unsigned getRegionCounter(const Stmt *S) { return CounterMap[S]; }
+
+ /// \brief Return the region count for the counter at the given index.
+ Counter getRegionCount(unsigned CounterId) {
+ return Counter::getCounter(CounterId);
+ }
+
+ /// \brief Return the counter value of the current region.
+ Counter getCurrentRegionCount() { return CurrentRegionCount; }
+
+ /// \brief Set the counter value for the current region.
+ /// This is used to keep track of changes to the most recent counter
+ /// from control flow and non-local exits.
+ void setCurrentRegionCount(Counter Count) {
+ CurrentRegionCount = Count;
+ CurrentUnreachableRegionInitiator = nullptr;
+ }
+
+ /// \brief Indicate that the current region is never reached,
+ /// and thus should have a counter value of zero.
+ /// This is important so that subsequent regions can correctly track
+ /// their parent counts.
+ void setCurrentRegionUnreachable(const Stmt *Initiator) {
+ CurrentRegionCount = Counter::getZero();
+ CurrentUnreachableRegionInitiator = Initiator;
+ }
+
+ /// \brief A counter for a particular region.
+ /// This is the primary interface through
+ /// which the coverage mapping builder manages counters and their values.
+ class RegionMapper {
+ CounterCoverageMappingBuilder &Mapping;
+ Counter Count;
+ Counter ParentCount;
+ Counter RegionCount;
+ Counter Adjust;
+
+ public:
+ RegionMapper(CounterCoverageMappingBuilder *Mapper, const Stmt *S)
+ : Mapping(*Mapper),
+ Count(Mapper->getRegionCount(Mapper->getRegionCounter(S))),
+ ParentCount(Mapper->getCurrentRegionCount()) {}
+
+ /// Get the value of the counter. In most cases this is the number of times
+ /// the region of the counter was entered, but for switch labels it's the
+ /// number of direct jumps to that label.
+ Counter getCount() const { return Count; }
+
+ /// Get the value of the counter with adjustments applied. Adjustments occur
+ /// when control enters or leaves the region abnormally; i.e., if there is a
+ /// jump to a label within the region, or if the function can return from
+ /// within the region. The adjusted count, then, is the value of the counter
+ /// at the end of the region.
+ Counter getAdjustedCount() const {
+ return Mapping.addCounters(Count, Adjust);
+ }
+
+ /// Get the value of the counter in this region's parent, i.e., the region
+ /// that was active when this region began. This is useful for deriving
+ /// counts in implicitly counted regions, like the false case of a condition
+ /// or the normal exits of a loop.
+ Counter getParentCount() const { return ParentCount; }
+
+ /// Activate the counter by emitting an increment and starting to track
+ /// adjustments. If AddIncomingFallThrough is true, the current region count
+ /// will be added to the counter for the purposes of tracking the region.
+ void beginRegion(bool AddIncomingFallThrough = false) {
+ RegionCount = Count;
+ if (AddIncomingFallThrough)
+ RegionCount =
+ Mapping.addCounters(RegionCount, Mapping.getCurrentRegionCount());
+ Mapping.setCurrentRegionCount(RegionCount);
+ }
+
+ /// For counters on boolean branches, begins tracking adjustments for the
+ /// uncounted path.
+ void beginElseRegion() {
+ RegionCount = Mapping.subtractCounters(ParentCount, Count);
+ Mapping.setCurrentRegionCount(RegionCount);
+ }
+
+ /// Reset the current region count.
+ void setCurrentRegionCount(Counter CurrentCount) {
+ RegionCount = CurrentCount;
+ Mapping.setCurrentRegionCount(RegionCount);
+ }
+
+ /// Adjust for non-local control flow after emitting a subexpression or
+ /// substatement. This must be called to account for constructs such as
+ /// gotos,
+ /// labels, and returns, so that we can ensure that our region's count is
+ /// correct in the code that follows.
+ void adjustForControlFlow() {
+ Adjust = Mapping.addCounters(
+ Adjust, Mapping.subtractCounters(Mapping.getCurrentRegionCount(),
+ RegionCount));
+ // Reset the region count in case this is called again later.
+ RegionCount = Mapping.getCurrentRegionCount();
+ }
+
+ /// Commit all adjustments to the current region. If the region is a loop,
+ /// the LoopAdjust value should be the count of all the breaks and continues
+ /// from the loop, to compensate for those counts being deducted from the
+ /// adjustments for the body of the loop.
+ void applyAdjustmentsToRegion() {
+ Mapping.setCurrentRegionCount(Mapping.addCounters(ParentCount, Adjust));
+ }
+ void applyAdjustmentsToRegion(Counter LoopAdjust) {
+ Mapping.setCurrentRegionCount(Mapping.addCounters(
+ Mapping.addCounters(ParentCount, Adjust), LoopAdjust));
+ }
+ };
+
+ /// \brief Keep counts of breaks and continues inside loops.
+ struct BreakContinue {
+ Counter BreakCount;
+ Counter ContinueCount;
+ };
+ SmallVector<BreakContinue, 8> BreakContinueStack;
+
+ CounterCoverageMappingBuilder(
+ CoverageMappingModuleGen &CVM,
+ llvm::DenseMap<const Stmt *, unsigned> &CounterMap, SourceManager &SM,
+ const LangOptions &LangOpts)
+ : CoverageMappingBuilder(CVM, SM, LangOpts), CounterMap(CounterMap) {}
+
+ /// \brief Write the mapping data to the output stream
+ void write(llvm::raw_ostream &OS) {
+ emitSourceRegions();
+ llvm::SmallVector<unsigned, 8> VirtualFileMapping;
+ createFileIDMapping(VirtualFileMapping);
+ gatherSkippedRegions();
+
+ CoverageMappingWriter Writer(
+ VirtualFileMapping, Builder.getExpressions(), MappingRegions);
+ Writer.write(OS);
+ }
+
+ /// \brief Associate the source code range with the current region count.
+ void mapSourceCodeRange(SourceLocation LocStart, SourceLocation LocEnd,
+ unsigned Flags = 0) {
+ CoverageMappingBuilder::mapSourceCodeRange(LocStart, LocEnd,
+ CurrentRegionCount, Flags);
+ }
+
+ void mapSourceCodeRange(SourceLocation LocStart) {
+ CoverageMappingBuilder::mapSourceCodeRange(LocStart, LocStart,
+ CurrentRegionCount);
+ }
+
+ /// \brief Associate the source range of a token with the current region
+ /// count.
+ /// Ignore the source range for this token if it produces a distinct
+ /// mapping region with no other source ranges.
+ void mapToken(SourceLocation LocStart) {
+ CoverageMappingBuilder::mapSourceCodeRange(
+ LocStart, LocStart, CurrentRegionCount,
+ SourceMappingRegion::IgnoreIfNotExtended);
+ }
+
+ void VisitStmt(const Stmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ for (Stmt::const_child_range I = S->children(); I; ++I) {
+ if (*I)
+ this->Visit(*I);
+ }
+ }
+
+ void VisitDecl(const Decl *D) {
+ if (!D->hasBody())
+ return;
+ // Counter tracks entry to the function body.
+ auto Body = D->getBody();
+ RegionMapper Cnt(this, Body);
+ Cnt.beginRegion();
+ Visit(Body);
+ }
+
+ void VisitDeclStmt(const DeclStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ for (Stmt::const_child_range I = static_cast<const Stmt *>(S)->children();
+ I; ++I) {
+ if (*I)
+ this->Visit(*I);
+ }
+ }
+
+ void VisitCompoundStmt(const CompoundStmt *S) {
+ mapSourceCodeRange(S->getLBracLoc());
+ mapSourceCodeRange(S->getRBracLoc());
+ for (Stmt::const_child_range I = S->children(); I; ++I) {
+ if (*I)
+ this->Visit(*I);
+ }
+ }
+
+ void VisitReturnStmt(const ReturnStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ if (S->getRetValue())
+ Visit(S->getRetValue());
+ setCurrentRegionUnreachable(S);
+ }
+
+ void VisitGotoStmt(const GotoStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ mapToken(S->getLabelLoc());
+ setCurrentRegionUnreachable(S);
+ }
+
+ void VisitLabelStmt(const LabelStmt *S) {
+ // Counter tracks the block following the label.
+ RegionMapper Cnt(this, S);
+ Cnt.beginRegion();
+ mapSourceCodeRange(S->getLocStart());
+ // Can't map the ':' token as its location isn't known.
+ Visit(S->getSubStmt());
+ }
+
+ void VisitBreakStmt(const BreakStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ assert(!BreakContinueStack.empty() && "break not in a loop or switch!");
+ BreakContinueStack.back().BreakCount = addCounters(
+ BreakContinueStack.back().BreakCount, getCurrentRegionCount());
+ setCurrentRegionUnreachable(S);
+ }
+
+ void VisitContinueStmt(const ContinueStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
+ BreakContinueStack.back().ContinueCount = addCounters(
+ BreakContinueStack.back().ContinueCount, getCurrentRegionCount());
+ setCurrentRegionUnreachable(S);
+ }
+
+ void VisitWhileStmt(const WhileStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ // Counter tracks the body of the loop.
+ RegionMapper Cnt(this, S);
+ BreakContinueStack.push_back(BreakContinue());
+ // Visit the body region first so the break/continue adjustments can be
+ // included when visiting the condition.
+ Cnt.beginRegion();
+ Visit(S->getBody());
+ Cnt.adjustForControlFlow();
+
+ // ...then go back and propagate counts through the condition. The count
+ // at the start of the condition is the sum of the incoming edges,
+ // the backedge from the end of the loop body, and the edges from
+ // continue statements.
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+ Cnt.setCurrentRegionCount(
+ addCounters(Cnt.getParentCount(),
+ addCounters(Cnt.getAdjustedCount(), BC.ContinueCount)));
+ beginSourceRegionGroup(S->getCond());
+ Visit(S->getCond());
+ endSourceRegionGroup();
+ Cnt.adjustForControlFlow();
+ Cnt.applyAdjustmentsToRegion(addCounters(BC.BreakCount, BC.ContinueCount));
+ }
+
+ void VisitDoStmt(const DoStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ // Counter tracks the body of the loop.
+ RegionMapper Cnt(this, S);
+ BreakContinueStack.push_back(BreakContinue());
+ Cnt.beginRegion(/*AddIncomingFallThrough=*/true);
+ Visit(S->getBody());
+ Cnt.adjustForControlFlow();
+
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+ // The count at the start of the condition is equal to the count at the
+ // end of the body. The adjusted count does not include either the
+ // fall-through count coming into the loop or the continue count, so add
+ // both of those separately. This is coincidentally the same equation as
+ // with while loops but for different reasons.
+ Cnt.setCurrentRegionCount(
+ addCounters(Cnt.getParentCount(),
+ addCounters(Cnt.getAdjustedCount(), BC.ContinueCount)));
+ Visit(S->getCond());
+ Cnt.adjustForControlFlow();
+ Cnt.applyAdjustmentsToRegion(addCounters(BC.BreakCount, BC.ContinueCount));
+ }
+
+ void VisitForStmt(const ForStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ if (S->getInit())
+ Visit(S->getInit());
+
+ // Counter tracks the body of the loop.
+ RegionMapper Cnt(this, S);
+ BreakContinueStack.push_back(BreakContinue());
+ // Visit the body region first. (This is basically the same as a while
+ // loop; see further comments in VisitWhileStmt.)
+ Cnt.beginRegion();
+ Visit(S->getBody());
+ Cnt.adjustForControlFlow();
+
+ // The increment is essentially part of the body but it needs to include
+ // the count for all the continue statements.
+ if (S->getInc()) {
+ Cnt.setCurrentRegionCount(addCounters(
+ getCurrentRegionCount(), BreakContinueStack.back().ContinueCount));
+ beginSourceRegionGroup(S->getInc());
+ Visit(S->getInc());
+ endSourceRegionGroup();
+ Cnt.adjustForControlFlow();
+ }
+
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+
+ // ...then go back and propagate counts through the condition.
+ if (S->getCond()) {
+ Cnt.setCurrentRegionCount(
+ addCounters(addCounters(Cnt.getParentCount(), Cnt.getAdjustedCount()),
+ BC.ContinueCount));
+ beginSourceRegionGroup(S->getCond());
+ Visit(S->getCond());
+ endSourceRegionGroup();
+ Cnt.adjustForControlFlow();
+ }
+ Cnt.applyAdjustmentsToRegion(addCounters(BC.BreakCount, BC.ContinueCount));
+ }
+
+ void VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ Visit(S->getRangeStmt());
+ Visit(S->getBeginEndStmt());
+ // Counter tracks the body of the loop.
+ RegionMapper Cnt(this, S);
+ BreakContinueStack.push_back(BreakContinue());
+ // Visit the body region first. (This is basically the same as a while
+ // loop; see further comments in VisitWhileStmt.)
+ Cnt.beginRegion();
+ Visit(S->getBody());
+ Cnt.adjustForControlFlow();
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+ Cnt.applyAdjustmentsToRegion(addCounters(BC.BreakCount, BC.ContinueCount));
+ }
+
+ void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ Visit(S->getElement());
+ // Counter tracks the body of the loop.
+ RegionMapper Cnt(this, S);
+ BreakContinueStack.push_back(BreakContinue());
+ Cnt.beginRegion();
+ Visit(S->getBody());
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+ Cnt.adjustForControlFlow();
+ Cnt.applyAdjustmentsToRegion(addCounters(BC.BreakCount, BC.ContinueCount));
+ }
+
+ void VisitSwitchStmt(const SwitchStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ Visit(S->getCond());
+ BreakContinueStack.push_back(BreakContinue());
+ // Map the '}' for the body to have the same count as the regions after
+ // the switch.
+ SourceLocation RBracLoc;
+ if (const auto *CS = dyn_cast<CompoundStmt>(S->getBody())) {
+ mapSourceCodeRange(CS->getLBracLoc());
+ setCurrentRegionUnreachable(S);
+ for (Stmt::const_child_range I = CS->children(); I; ++I) {
+ if (*I)
+ this->Visit(*I);
+ }
+ RBracLoc = CS->getRBracLoc();
+ } else {
+ setCurrentRegionUnreachable(S);
+ Visit(S->getBody());
+ }
+ // If the switch is inside a loop, add the continue counts.
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+ if (!BreakContinueStack.empty())
+ BreakContinueStack.back().ContinueCount = addCounters(
+ BreakContinueStack.back().ContinueCount, BC.ContinueCount);
+ // Counter tracks the exit block of the switch.
+ RegionMapper ExitCnt(this, S);
+ ExitCnt.beginRegion();
+ if (RBracLoc.isValid())
+ mapSourceCodeRange(RBracLoc);
+ }
+
+ void VisitCaseStmt(const CaseStmt *S) {
+ // Counter for this particular case. This counts only jumps from the
+ // switch header and does not include fallthrough from the case before
+ // this one.
+ RegionMapper Cnt(this, S);
+ Cnt.beginRegion(/*AddIncomingFallThrough=*/true);
+ mapSourceCodeRange(S->getLocStart());
+ mapToken(S->getColonLoc());
+ Visit(S->getSubStmt());
+ }
+
+ void VisitDefaultStmt(const DefaultStmt *S) {
+ // Counter for this default case. This does not include fallthrough from
+ // the previous case.
+ RegionMapper Cnt(this, S);
+ Cnt.beginRegion(/*AddIncomingFallThrough=*/true);
+ mapSourceCodeRange(S->getLocStart());
+ mapToken(S->getColonLoc());
+ Visit(S->getSubStmt());
+ }
+
+ void VisitIfStmt(const IfStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ Visit(S->getCond());
+ mapToken(S->getElseLoc());
+
+ // Counter tracks the "then" part of an if statement. The count for
+ // the "else" part, if it exists, will be calculated from this counter.
+ RegionMapper Cnt(this, S);
+ Cnt.beginRegion();
+ Visit(S->getThen());
+ Cnt.adjustForControlFlow();
+
+ if (S->getElse()) {
+ Cnt.beginElseRegion();
+ Visit(S->getElse());
+ Cnt.adjustForControlFlow();
+ }
+ Cnt.applyAdjustmentsToRegion();
+ }
+
+ void VisitCXXTryStmt(const CXXTryStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ Visit(S->getTryBlock());
+ for (unsigned I = 0, E = S->getNumHandlers(); I < E; ++I)
+ Visit(S->getHandler(I));
+ // Counter tracks the continuation block of the try statement.
+ RegionMapper Cnt(this, S);
+ Cnt.beginRegion();
+ }
+
+ void VisitCXXCatchStmt(const CXXCatchStmt *S) {
+ mapSourceCodeRange(S->getLocStart());
+ // Counter tracks the catch statement's handler block.
+ RegionMapper Cnt(this, S);
+ Cnt.beginRegion();
+ Visit(S->getHandlerBlock());
+ }
+
+ void VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ Visit(E->getCond());
+ mapToken(E->getQuestionLoc());
+ mapToken(E->getColonLoc());
+
+ // Counter tracks the "true" part of a conditional operator. The
+ // count in the "false" part will be calculated from this counter.
+ RegionMapper Cnt(this, E);
+ Cnt.beginRegion();
+ Visit(E->getTrueExpr());
+ Cnt.adjustForControlFlow();
+
+ Cnt.beginElseRegion();
+ Visit(E->getFalseExpr());
+ Cnt.adjustForControlFlow();
+
+ Cnt.applyAdjustmentsToRegion();
+ }
+
+ void VisitBinLAnd(const BinaryOperator *E) {
+ Visit(E->getLHS());
+ mapToken(E->getOperatorLoc());
+ // Counter tracks the right hand side of a logical and operator.
+ RegionMapper Cnt(this, E);
+ Cnt.beginRegion();
+ Visit(E->getRHS());
+ Cnt.adjustForControlFlow();
+ Cnt.applyAdjustmentsToRegion();
+ }
+
+ void VisitBinLOr(const BinaryOperator *E) {
+ Visit(E->getLHS());
+ mapToken(E->getOperatorLoc());
+ // Counter tracks the right hand side of a logical or operator.
+ RegionMapper Cnt(this, E);
+ Cnt.beginRegion();
+ Visit(E->getRHS());
+ Cnt.adjustForControlFlow();
+ Cnt.applyAdjustmentsToRegion();
+ }
+
+ void VisitParenExpr(const ParenExpr *E) {
+ mapToken(E->getLParen());
+ Visit(E->getSubExpr());
+ mapToken(E->getRParen());
+ }
+
+ void VisitBinaryOperator(const BinaryOperator *E) {
+ Visit(E->getLHS());
+ mapToken(E->getOperatorLoc());
+ Visit(E->getRHS());
+ }
+
+ void VisitUnaryOperator(const UnaryOperator *E) {
+ bool Postfix = E->isPostfix();
+ if (!Postfix)
+ mapToken(E->getOperatorLoc());
+ Visit(E->getSubExpr());
+ if (Postfix)
+ mapToken(E->getOperatorLoc());
+ }
+
+ void VisitMemberExpr(const MemberExpr *E) {
+ Visit(E->getBase());
+ mapToken(E->getMemberLoc());
+ }
+
+ void VisitCallExpr(const CallExpr *E) {
+ Visit(E->getCallee());
+ for (const auto &Arg : E->arguments())
+ Visit(Arg);
+ mapToken(E->getRParenLoc());
+ }
+
+ void VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ Visit(E->getLHS());
+ Visit(E->getRHS());
+ mapToken(E->getRBracketLoc());
+ }
+
+ void VisitCStyleCastExpr(const CStyleCastExpr *E) {
+ mapToken(E->getLParenLoc());
+ mapToken(E->getRParenLoc());
+ Visit(E->getSubExpr());
+ }
+
+ // Map literals as tokens so that the macros like #define PI 3.14
+ // won't generate coverage mapping regions.
+
+ void VisitIntegerLiteral(const IntegerLiteral *E) {
+ mapToken(E->getLocStart());
+ }
+
+ void VisitFloatingLiteral(const FloatingLiteral *E) {
+ mapToken(E->getLocStart());
+ }
+
+ void VisitCharacterLiteral(const CharacterLiteral *E) {
+ mapToken(E->getLocStart());
+ }
+
+ void VisitStringLiteral(const StringLiteral *E) {
+ mapToken(E->getLocStart());
+ }
+
+ void VisitImaginaryLiteral(const ImaginaryLiteral *E) {
+ mapToken(E->getLocStart());
+ }
+
+ void VisitObjCMessageExpr(const ObjCMessageExpr *E) {
+ mapToken(E->getLeftLoc());
+ for (Stmt::const_child_range I = static_cast<const Stmt*>(E)->children(); I;
+ ++I) {
+ if (*I)
+ this->Visit(*I);
+ }
+ mapToken(E->getRightLoc());
+ }
+};
+}
+
+static bool isMachO(const CodeGenModule &CGM) {
+ return CGM.getTarget().getTriple().isOSBinFormatMachO();
+}
+
+static StringRef getCoverageSection(const CodeGenModule &CGM) {
+ return isMachO(CGM) ? "__DATA,__llvm_covmap" : "__llvm_covmap";
+}
+
+static void dump(llvm::raw_ostream &OS, const CoverageMappingRecord &Function) {
+ OS << Function.FunctionName << ":\n";
+ CounterMappingContext Ctx(Function.Expressions);
+ for (const auto &R : Function.MappingRegions) {
+ OS.indent(2);
+ switch (R.Kind) {
+ case CounterMappingRegion::CodeRegion:
+ break;
+ case CounterMappingRegion::ExpansionRegion:
+ OS << "Expansion,";
+ break;
+ case CounterMappingRegion::SkippedRegion:
+ OS << "Skipped,";
+ break;
+ }
+
+ OS << "File " << R.FileID << ", " << R.LineStart << ":"
+ << R.ColumnStart << " -> " << R.LineEnd << ":" << R.ColumnEnd
+ << " = ";
+ Ctx.dump(R.Count);
+ OS << " (HasCodeBefore = " << R.HasCodeBefore;
+ if (R.Kind == CounterMappingRegion::ExpansionRegion)
+ OS << ", Expanded file = " << R.ExpandedFileID;
+
+ OS << ")\n";
+ }
+}
+
+void CoverageMappingModuleGen::addFunctionMappingRecord(
+ llvm::GlobalVariable *FunctionName, StringRef FunctionNameValue,
+ uint64_t FunctionHash, const std::string &CoverageMapping) {
+ llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+ auto *Int32Ty = llvm::Type::getInt32Ty(Ctx);
+ auto *Int64Ty = llvm::Type::getInt64Ty(Ctx);
+ auto *Int8PtrTy = llvm::Type::getInt8PtrTy(Ctx);
+ if (!FunctionRecordTy) {
+ llvm::Type *FunctionRecordTypes[] = {Int8PtrTy, Int32Ty, Int32Ty, Int64Ty};
+ FunctionRecordTy =
+ llvm::StructType::get(Ctx, makeArrayRef(FunctionRecordTypes));
+ }
+
+ llvm::Constant *FunctionRecordVals[] = {
+ llvm::ConstantExpr::getBitCast(FunctionName, Int8PtrTy),
+ llvm::ConstantInt::get(Int32Ty, FunctionNameValue.size()),
+ llvm::ConstantInt::get(Int32Ty, CoverageMapping.size()),
+ llvm::ConstantInt::get(Int64Ty, FunctionHash)};
+ FunctionRecords.push_back(llvm::ConstantStruct::get(
+ FunctionRecordTy, makeArrayRef(FunctionRecordVals)));
+ CoverageMappings += CoverageMapping;
+
+ if (CGM.getCodeGenOpts().DumpCoverageMapping) {
+ // Dump the coverage mapping data for this function by decoding the
+ // encoded data. This allows us to dump the mapping regions which were
+ // also processed by the CoverageMappingWriter which performs
+ // additional minimization operations such as reducing the number of
+ // expressions.
+ std::vector<StringRef> Filenames;
+ std::vector<CounterExpression> Expressions;
+ std::vector<CounterMappingRegion> Regions;
+ llvm::SmallVector<StringRef, 16> FilenameRefs;
+ FilenameRefs.resize(FileEntries.size());
+ for (const auto &Entry : FileEntries)
+ FilenameRefs[Entry.second] = Entry.first->getName();
+ RawCoverageMappingReader Reader(FunctionNameValue, CoverageMapping,
+ FilenameRefs,
+ Filenames, Expressions, Regions);
+ CoverageMappingRecord FunctionRecord;
+ if (Reader.read(FunctionRecord))
+ return;
+ dump(llvm::outs(), FunctionRecord);
+ }
+}
+
+void CoverageMappingModuleGen::emit() {
+ if (FunctionRecords.empty())
+ return;
+ llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+ auto *Int32Ty = llvm::Type::getInt32Ty(Ctx);
+
+ // Create the filenames and merge them with coverage mappings
+ llvm::SmallVector<std::string, 16> FilenameStrs;
+ llvm::SmallVector<StringRef, 16> FilenameRefs;
+ FilenameStrs.resize(FileEntries.size());
+ FilenameRefs.resize(FileEntries.size());
+ for (const auto &Entry : FileEntries) {
+ llvm::SmallString<256> Path(Entry.first->getName());
+ llvm::sys::fs::make_absolute(Path);
+
+ auto I = Entry.second;
+ FilenameStrs[I] = std::move(std::string(Path.begin(), Path.end()));
+ FilenameRefs[I] = FilenameStrs[I];
+ }
+
+ std::string FilenamesAndCoverageMappings;
+ llvm::raw_string_ostream OS(FilenamesAndCoverageMappings);
+ CoverageFilenamesSectionWriter(FilenameRefs).write(OS);
+ OS << CoverageMappings;
+ size_t CoverageMappingSize = CoverageMappings.size();
+ size_t FilenamesSize = OS.str().size() - CoverageMappingSize;
+ // Append extra zeroes if necessary to ensure that the size of the filenames
+ // and coverage mappings is a multiple of 8.
+ if (size_t Rem = OS.str().size() % 8) {
+ CoverageMappingSize += 8 - Rem;
+ for (size_t I = 0, S = 8 - Rem; I < S; ++I)
+ OS << '\0';
+ }
+ auto *FilenamesAndMappingsVal =
+ llvm::ConstantDataArray::getString(Ctx, OS.str(), false);
+
+ // Create the deferred function records array
+ auto RecordsTy =
+ llvm::ArrayType::get(FunctionRecordTy, FunctionRecords.size());
+ auto RecordsVal = llvm::ConstantArray::get(RecordsTy, FunctionRecords);
+
+ // Create the coverage data record
+ llvm::Type *CovDataTypes[] = {Int32Ty, Int32Ty,
+ Int32Ty, Int32Ty,
+ RecordsTy, FilenamesAndMappingsVal->getType()};
+ auto CovDataTy = llvm::StructType::get(Ctx, makeArrayRef(CovDataTypes));
+ llvm::Constant *TUDataVals[] = {
+ llvm::ConstantInt::get(Int32Ty, FunctionRecords.size()),
+ llvm::ConstantInt::get(Int32Ty, FilenamesSize),
+ llvm::ConstantInt::get(Int32Ty, CoverageMappingSize),
+ llvm::ConstantInt::get(Int32Ty,
+ /*Version=*/CoverageMappingVersion1),
+ RecordsVal, FilenamesAndMappingsVal};
+ auto CovDataVal =
+ llvm::ConstantStruct::get(CovDataTy, makeArrayRef(TUDataVals));
+ auto CovData = new llvm::GlobalVariable(CGM.getModule(), CovDataTy, true,
+ llvm::GlobalValue::InternalLinkage,
+ CovDataVal,
+ "__llvm_coverage_mapping");
+
+ CovData->setSection(getCoverageSection(CGM));
+ CovData->setAlignment(8);
+
+ // Make sure the data doesn't get deleted.
+ CGM.addUsedGlobal(CovData);
+}
+
+unsigned CoverageMappingModuleGen::getFileID(const FileEntry *File) {
+ auto It = FileEntries.find(File);
+ if (It != FileEntries.end())
+ return It->second;
+ unsigned FileID = FileEntries.size();
+ FileEntries.insert(std::make_pair(File, FileID));
+ return FileID;
+}
+
+void CoverageMappingGen::emitCounterMapping(const Decl *D,
+ llvm::raw_ostream &OS) {
+ assert(CounterMap);
+ CounterCoverageMappingBuilder Walker(CVM, *CounterMap, SM, LangOpts);
+ Walker.VisitDecl(D);
+ Walker.write(OS);
+}
+
+void CoverageMappingGen::emitEmptyMapping(const Decl *D,
+ llvm::raw_ostream &OS) {
+ EmptyCoverageMappingBuilder Walker(CVM, SM, LangOpts);
+ Walker.VisitDecl(D);
+ Walker.write(OS);
+}
diff --git a/lib/CodeGen/CoverageMappingGen.h b/lib/CodeGen/CoverageMappingGen.h
new file mode 100644
index 000000000000..0d1bf6d975c9
--- /dev/null
+++ b/lib/CodeGen/CoverageMappingGen.h
@@ -0,0 +1,114 @@
+//===---- CoverageMappingGen.h - Coverage mapping generation ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Instrumentation-based code coverage mapping generator
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_COVERAGEMAPPINGGEN_H
+#define LLVM_CLANG_LIB_CODEGEN_COVERAGEMAPPINGGEN_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+
+class LangOptions;
+class SourceManager;
+class FileEntry;
+class Preprocessor;
+class Decl;
+class Stmt;
+
+/// \brief Stores additional source code information like skipped ranges which
+/// is required by the coverage mapping generator and is obtained from
+/// the preprocessor.
+class CoverageSourceInfo : public PPCallbacks {
+ std::vector<SourceRange> SkippedRanges;
+public:
+ ArrayRef<SourceRange> getSkippedRanges() const { return SkippedRanges; }
+
+ void SourceRangeSkipped(SourceRange Range) override;
+};
+
+namespace CodeGen {
+
+class CodeGenModule;
+
+/// \brief Organizes the cross-function state that is used while generating
+/// code coverage mapping data.
+class CoverageMappingModuleGen {
+ CodeGenModule &CGM;
+ CoverageSourceInfo &SourceInfo;
+ llvm::SmallDenseMap<const FileEntry *, unsigned, 8> FileEntries;
+ std::vector<llvm::Constant *> FunctionRecords;
+ llvm::StructType *FunctionRecordTy;
+ std::string CoverageMappings;
+
+public:
+ CoverageMappingModuleGen(CodeGenModule &CGM, CoverageSourceInfo &SourceInfo)
+ : CGM(CGM), SourceInfo(SourceInfo), FunctionRecordTy(nullptr) {}
+
+ CoverageSourceInfo &getSourceInfo() const {
+ return SourceInfo;
+ }
+
+ /// \brief Add a function's coverage mapping record to the collection of the
+ /// function mapping records.
+ void addFunctionMappingRecord(llvm::GlobalVariable *FunctionName,
+ StringRef FunctionNameValue,
+ uint64_t FunctionHash,
+ const std::string &CoverageMapping);
+
+ /// \brief Emit the coverage mapping data for a translation unit.
+ void emit();
+
+ /// \brief Return the coverage mapping translation unit file id
+ /// for the given file.
+ unsigned getFileID(const FileEntry *File);
+};
+
+/// \brief Organizes the per-function state that is used while generating
+/// code coverage mapping data.
+class CoverageMappingGen {
+ CoverageMappingModuleGen &CVM;
+ SourceManager &SM;
+ const LangOptions &LangOpts;
+ llvm::DenseMap<const Stmt *, unsigned> *CounterMap;
+
+public:
+ CoverageMappingGen(CoverageMappingModuleGen &CVM, SourceManager &SM,
+ const LangOptions &LangOpts)
+ : CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(nullptr) {}
+
+ CoverageMappingGen(CoverageMappingModuleGen &CVM, SourceManager &SM,
+ const LangOptions &LangOpts,
+ llvm::DenseMap<const Stmt *, unsigned> *CounterMap)
+ : CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(CounterMap) {}
+
+ /// \brief Emit the coverage mapping data which maps the regions of
+ /// code to counters that will be used to find the execution
+ /// counts for those regions.
+ void emitCounterMapping(const Decl *D, llvm::raw_ostream &OS);
+
+ /// \brief Emit the coverage mapping data for an unused function.
+ /// It creates mapping regions with the counter of zero.
+ void emitEmptyMapping(const Decl *D, llvm::raw_ostream &OS);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/EHScopeStack.h b/lib/CodeGen/EHScopeStack.h
index b9ccfb63d7d5..e69584840990 100644
--- a/lib/CodeGen/EHScopeStack.h
+++ b/lib/CodeGen/EHScopeStack.h
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_EHSCOPESTACK_H
-#define CLANG_CODEGEN_EHSCOPESTACK_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_EHSCOPESTACK_H
+#define LLVM_CLANG_LIB_CODEGEN_EHSCOPESTACK_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/SmallVector.h"
@@ -74,7 +74,7 @@ template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
-enum CleanupKind {
+enum CleanupKind : unsigned {
EHCleanup = 0x1,
NormalCleanup = 0x2,
NormalAndEHCleanup = EHCleanup | NormalCleanup,
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index d7e61f0fe577..f2ffabcf3e9d 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -106,8 +106,11 @@ public:
llvm::Value *Addr,
const MemberPointerType *MPT) override;
- llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF, llvm::Value *ptr,
- QualType type) override;
+ void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
+ llvm::Value *Ptr, QualType ElementType,
+ const CXXDestructorDecl *Dtor) override;
+
+ void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
void EmitFundamentalRTTIDescriptor(QualType Type);
void EmitFundamentalRTTIDescriptors();
@@ -138,15 +141,10 @@ public:
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) override;
- void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
- CXXCtorType T, CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) override;
-
void EmitCXXConstructors(const CXXConstructorDecl *D) override;
- void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
- CXXDtorType T, CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) override;
+ void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ SmallVectorImpl<CanQualType> &ArgTys) override;
bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
CXXDtorType DT) const override {
@@ -192,10 +190,11 @@ public:
llvm::Value *This,
llvm::Type *Ty) override;
- void EmitVirtualDestructorCall(CodeGenFunction &CGF,
- const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType, SourceLocation CallLoc,
- llvm::Value *This) override;
+ llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
+ const CXXDestructorDecl *Dtor,
+ CXXDtorType DtorType,
+ llvm::Value *This,
+ const CXXMemberCallExpr *CE) override;
void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
@@ -213,6 +212,12 @@ public:
llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
const ReturnAdjustment &RA) override;
+ size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
+ FunctionArgList &Args) const override {
+ assert(!Args.empty() && "expected the arglist to not be empty!");
+ return Args.size() - 1;
+ }
+
StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
StringRef GetDeletedVirtualCallName() override
{ return "__cxa_deleted_virtual"; }
@@ -234,10 +239,15 @@ public:
llvm::Constant *dtor, llvm::Constant *addr) override;
llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
- llvm::GlobalVariable *Var);
+ llvm::Value *Val);
void EmitThreadLocalInitFuncs(
- ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls,
- llvm::Function *InitFunc) override;
+ CodeGenModule &CGM,
+ ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
+ CXXThreadLocals,
+ ArrayRef<llvm::Function *> CXXThreadLocalInits,
+ ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) override;
+
+ bool usesThreadWrapperFunction() const override { return true; }
LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
QualType LValType) override;
@@ -273,6 +283,8 @@ public:
classifyRTTIUniqueness(QualType CanTy,
llvm::GlobalValue::LinkageTypes Linkage) const;
friend class ItaniumRTTIBuilder;
+
+ void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
};
class ARMCXXABI : public ItaniumCXXABI {
@@ -348,7 +360,7 @@ llvm::Type *
ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
if (MPT->isMemberDataPointer())
return CGM.PtrDiffTy;
- return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, NULL);
+ return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, nullptr);
}
/// In the Itanium and ARM ABIs, method pointers have the form:
@@ -839,21 +851,56 @@ bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
/// The Itanium ABI always places an offset to the complete object
/// at entry -2 in the vtable.
-llvm::Value *ItaniumCXXABI::adjustToCompleteObject(CodeGenFunction &CGF,
- llvm::Value *ptr,
- QualType type) {
- // Grab the vtable pointer as an intptr_t*.
- llvm::Value *vtable = CGF.GetVTablePtr(ptr, CGF.IntPtrTy->getPointerTo());
-
- // Track back to entry -2 and pull out the offset there.
- llvm::Value *offsetPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(vtable, -2, "complete-offset.ptr");
- llvm::LoadInst *offset = CGF.Builder.CreateLoad(offsetPtr);
- offset->setAlignment(CGF.PointerAlignInBytes);
+void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
+ const CXXDeleteExpr *DE,
+ llvm::Value *Ptr,
+ QualType ElementType,
+ const CXXDestructorDecl *Dtor) {
+ bool UseGlobalDelete = DE->isGlobalDelete();
+ if (UseGlobalDelete) {
+ // Derive the complete-object pointer, which is what we need
+ // to pass to the deallocation function.
+
+ // Grab the vtable pointer as an intptr_t*.
+ llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo());
+
+ // Track back to entry -2 and pull out the offset there.
+ llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
+ VTable, -2, "complete-offset.ptr");
+ llvm::LoadInst *Offset = CGF.Builder.CreateLoad(OffsetPtr);
+ Offset->setAlignment(CGF.PointerAlignInBytes);
+
+ // Apply the offset.
+ llvm::Value *CompletePtr = CGF.Builder.CreateBitCast(Ptr, CGF.Int8PtrTy);
+ CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
+
+ // If we're supposed to call the global delete, make sure we do so
+ // even if the destructor throws.
+ CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
+ ElementType);
+ }
- // Apply the offset.
- ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
- return CGF.Builder.CreateInBoundsGEP(ptr, offset);
+ // FIXME: Provide a source location here even though there's no
+ // CXXMemberCallExpr for dtor call.
+ CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
+ EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
+
+ if (UseGlobalDelete)
+ CGF.PopCleanupBlock();
+}
+
+void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
+ // void __cxa_rethrow();
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
+
+ llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
+
+ if (isNoReturn)
+ CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
+ else
+ CGF.EmitRuntimeCallOrInvoke(Fn);
}
static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
@@ -1066,23 +1113,6 @@ ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
return VBaseOffset;
}
-/// The generic ABI passes 'this', plus a VTT if it's initializing a
-/// base subobject.
-void
-ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
- CXXCtorType Type, CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) {
- ASTContext &Context = getContext();
-
- // All parameters are already in place except VTT, which goes after 'this'.
- // These are Clang types, so we don't need to worry about sret yet.
-
- // Check if we need to add a VTT parameter (which has type void **).
- if (Type == Ctor_Base && Ctor->getParent()->getNumVBases() != 0)
- ArgTys.insert(ArgTys.begin() + 1,
- Context.getPointerType(Context.VoidPtrTy));
-}
-
void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
// Just make sure we're in sync with TargetCXXABI.
assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
@@ -1092,27 +1122,25 @@ void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
// The constructor used for constructing this as a complete class;
- // constucts the virtual bases, then calls the base constructor.
+ // constructs the virtual bases, then calls the base constructor.
if (!D->getParent()->isAbstract()) {
// We don't need to emit the complete ctor if the class is abstract.
CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
}
}
-/// The generic ABI passes 'this', plus a VTT if it's destroying a
-/// base subobject.
-void ItaniumCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
- CXXDtorType Type,
- CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) {
+void
+ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ SmallVectorImpl<CanQualType> &ArgTys) {
ASTContext &Context = getContext();
- // 'this' parameter is already there, as well as 'this' return if
- // HasThisReturn(GlobalDecl(Dtor, Type)) is true
+ // All parameters are already in place except VTT, which goes after 'this'.
+ // These are Clang types, so we don't need to worry about sret yet.
// Check if we need to add a VTT parameter (which has type void **).
- if (Type == Dtor_Base && Dtor->getParent()->getNumVBases() != 0)
- ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+ if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0)
+ ArgTys.insert(ArgTys.begin() + 1,
+ Context.getPointerType(Context.VoidPtrTy));
}
void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
@@ -1201,11 +1229,10 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
if (!Callee)
- Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
+ Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
- // FIXME: Provide a source location here.
- CGF.EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
- VTT, VTTTy, nullptr, nullptr);
+ CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(), This, VTT,
+ VTTTy, nullptr);
}
void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -1232,6 +1259,12 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
// Set the right visibility.
CGM.setGlobalVisibility(VTable, RD);
+ // Use pointer alignment for the vtable. Otherwise we would align them based
+ // on the size of the initializer which doesn't make sense as only single
+ // values are read.
+ unsigned PAlign = CGM.getTarget().getPointerAlign(0);
+ VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity());
+
// If this is the magic class __cxxabiv1::__fundamental_type_info,
// we will emit the typeinfo for the fundamental types. This is the
// same behaviour as GCC.
@@ -1339,22 +1372,21 @@ llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
return CGF.Builder.CreateLoad(VFuncPtr);
}
-void ItaniumCXXABI::EmitVirtualDestructorCall(CodeGenFunction &CGF,
- const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType,
- SourceLocation CallLoc,
- llvm::Value *This) {
+llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
+ CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
+ llvm::Value *This, const CXXMemberCallExpr *CE) {
+ assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
- const CGFunctionInfo *FInfo
- = &CGM.getTypes().arrangeCXXDestructor(Dtor, DtorType);
+ const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
+ Dtor, getFromDtorType(DtorType));
llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
llvm::Value *Callee =
getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty);
- CGF.EmitCXXMemberCall(Dtor, CallLoc, Callee, ReturnValueSlot(), This,
- /*ImplicitParam=*/nullptr, QualType(), nullptr,
- nullptr);
+ CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(), This,
+ /*ImplicitParam=*/nullptr, QualType(), CE);
+ return nullptr;
}
void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
@@ -1473,10 +1505,20 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
CookieOffset.getQuantity());
// Write the number of elements into the appropriate slot.
- llvm::Value *NumElementsPtr
- = CGF.Builder.CreateBitCast(CookiePtr,
- CGF.ConvertType(SizeTy)->getPointerTo(AS));
- CGF.Builder.CreateStore(NumElements, NumElementsPtr);
+ llvm::Type *NumElementsTy = CGF.ConvertType(SizeTy)->getPointerTo(AS);
+ llvm::Value *NumElementsPtr =
+ CGF.Builder.CreateBitCast(CookiePtr, NumElementsTy);
+ llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
+ if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
+ expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
+ // The store to the CookiePtr does not need to be instrumented.
+ CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, NumElementsTy, false);
+ llvm::Constant *F =
+ CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
+ CGF.Builder.CreateCall(F, NumElementsPtr);
+ }
// Finally, compute a pointer to the actual data buffer by skipping
// over the cookie completely.
@@ -1499,7 +1541,18 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
unsigned AS = allocPtr->getType()->getPointerAddressSpace();
numElementsPtr =
CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
- return CGF.Builder.CreateLoad(numElementsPtr);
+ if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
+ return CGF.Builder.CreateLoad(numElementsPtr);
+ // In asan mode emit a function call instead of a regular load and let the
+ // run-time deal with it: if the shadow is properly poisoned return the
+ // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
+ // We can't simply ignore this load using nosanitize metadata because
+ // the metadata may be lost.
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
+ llvm::Constant *F =
+ CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
+ return CGF.Builder.CreateCall(F, numElementsPtr);
}
CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
@@ -1656,6 +1709,16 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// If the variable is thread-local, so is its guard variable.
guard->setThreadLocalMode(var->getThreadLocalMode());
+ // The ABI says: It is suggested that it be emitted in the same COMDAT group
+ // as the associated data object
+ llvm::Comdat *C = var->getComdat();
+ if (!D.isLocalVarDecl() && C) {
+ guard->setComdat(C);
+ CGF.CurFn->setComdat(C);
+ } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
+ guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
+ }
+
CGM.setStaticLocalDeclGuardAddress(&D, guard);
}
@@ -1851,7 +1914,7 @@ getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
llvm::Function *
ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
- llvm::GlobalVariable *Var) {
+ llvm::Value *Val) {
// Mangle the name for the thread_local wrapper function.
SmallString<256> WrapperName;
{
@@ -1860,10 +1923,10 @@ ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
Out.flush();
}
- if (llvm::Value *V = Var->getParent()->getNamedValue(WrapperName))
+ if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
return cast<llvm::Function>(V);
- llvm::Type *RetTy = Var->getType();
+ llvm::Type *RetTy = Val->getType();
if (VD->getType()->isReferenceType())
RetTy = RetTy->getPointerElementType();
@@ -1878,11 +1941,29 @@ ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
}
void ItaniumCXXABI::EmitThreadLocalInitFuncs(
- ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls,
- llvm::Function *InitFunc) {
- for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
- const VarDecl *VD = Decls[I].first;
- llvm::GlobalVariable *Var = Decls[I].second;
+ CodeGenModule &CGM,
+ ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
+ CXXThreadLocals, ArrayRef<llvm::Function *> CXXThreadLocalInits,
+ ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) {
+ llvm::Function *InitFunc = nullptr;
+ if (!CXXThreadLocalInits.empty()) {
+ // Generate a guarded initialization function.
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init",
+ SourceLocation(),
+ /*TLS=*/true);
+ llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
+ llvm::GlobalVariable::InternalLinkage,
+ llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
+ Guard->setThreadLocal(true);
+ CodeGenFunction(CGM)
+ .GenerateCXXGlobalInitFunc(InitFunc, CXXThreadLocalInits, Guard);
+ }
+ for (unsigned I = 0, N = CXXThreadLocals.size(); I != N; ++I) {
+ const VarDecl *VD = CXXThreadLocals[I].first;
+ llvm::GlobalVariable *Var = CXXThreadLocals[I].second;
// Some targets require that all access to thread local variables go through
// the thread wrapper. This means that we cannot attempt to create a thread
@@ -1951,7 +2032,9 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
LI->setAlignment(CGM.getContext().getDeclAlign(VD).getQuantity());
Val = LI;
}
-
+ if (Val->getType() != Wrapper->getReturnType())
+ Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Val, Wrapper->getReturnType(), "");
Builder.CreateRet(Val);
}
}
@@ -1962,8 +2045,7 @@ LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
QualType T = VD->getType();
llvm::Type *Ty = CGF.getTypes().ConvertTypeForMem(T);
llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD, Ty);
- llvm::Function *Wrapper =
- getOrCreateThreadLocalWrapper(VD, cast<llvm::GlobalVariable>(Val));
+ llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
Val = CGF.Builder.CreateCall(Wrapper);
@@ -2125,6 +2207,11 @@ ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
/*Constant=*/true,
llvm::GlobalValue::ExternalLinkage, nullptr,
Name);
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (RD->hasAttr<DLLImportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
+ }
}
return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
@@ -2247,7 +2334,11 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
// FIXME: this may need to be reconsidered if the key function
// changes.
- return CGM.getVTables().isVTableExternal(RD);
+ if (CGM.getVTables().isVTableExternal(RD))
+ return true;
+
+ if (RD->hasAttr<DLLImportAttr>())
+ return true;
}
return false;
@@ -2742,7 +2833,7 @@ static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
if (Base->isVirtual()) {
// Mark the virtual base as seen.
- if (!Bases.VirtualBases.insert(BaseDecl)) {
+ if (!Bases.VirtualBases.insert(BaseDecl).second) {
// If this virtual base has been seen before, then the class is diamond
// shaped.
Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
@@ -2752,7 +2843,7 @@ static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
}
} else {
// Mark the non-virtual base as seen.
- if (!Bases.NonVirtualBases.insert(BaseDecl)) {
+ if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
// If this non-virtual base has been seen before, then the class has non-
// diamond shaped repeated inheritance.
Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
@@ -2988,3 +3079,128 @@ ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
return RUK_NonUniqueVisible;
}
+
+// Find out how to codegen the complete destructor and constructor
+namespace {
+enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
+}
+static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
+ const CXXMethodDecl *MD) {
+ if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
+ return StructorCodegen::Emit;
+
+ // The complete and base structors are not equivalent if there are any virtual
+ // bases, so emit separate functions.
+ if (MD->getParent()->getNumVBases())
+ return StructorCodegen::Emit;
+
+ GlobalDecl AliasDecl;
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ AliasDecl = GlobalDecl(DD, Dtor_Complete);
+ } else {
+ const auto *CD = cast<CXXConstructorDecl>(MD);
+ AliasDecl = GlobalDecl(CD, Ctor_Complete);
+ }
+ llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
+
+ if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
+ return StructorCodegen::RAUW;
+
+ // FIXME: Should we allow available_externally aliases?
+ if (!llvm::GlobalAlias::isValidLinkage(Linkage))
+ return StructorCodegen::RAUW;
+
+ if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
+ // Only ELF supports COMDATs with arbitrary names (C5/D5).
+ if (CGM.getTarget().getTriple().isOSBinFormatELF())
+ return StructorCodegen::COMDAT;
+ return StructorCodegen::Emit;
+ }
+
+ return StructorCodegen::Alias;
+}
+
+static void emitConstructorDestructorAlias(CodeGenModule &CGM,
+ GlobalDecl AliasDecl,
+ GlobalDecl TargetDecl) {
+ llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
+
+ StringRef MangledName = CGM.getMangledName(AliasDecl);
+ llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
+ if (Entry && !Entry->isDeclaration())
+ return;
+
+ auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
+ llvm::PointerType *AliasType = Aliasee->getType();
+
+ // Create the alias with no name.
+ auto *Alias = llvm::GlobalAlias::create(
+ AliasType->getElementType(), 0, Linkage, "", Aliasee, &CGM.getModule());
+
+ // Switch any previous uses to the alias.
+ if (Entry) {
+ assert(Entry->getType() == AliasType &&
+ "declaration exists with different type");
+ Alias->takeName(Entry);
+ Entry->replaceAllUsesWith(Alias);
+ Entry->eraseFromParent();
+ } else {
+ Alias->setName(MangledName);
+ }
+
+ // Finally, set up the alias with its proper name and attributes.
+ CGM.setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
+}
+
+void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
+ StructorType Type) {
+ auto *CD = dyn_cast<CXXConstructorDecl>(MD);
+ const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
+
+ StructorCodegen CGType = getCodegenToUse(CGM, MD);
+
+ if (Type == StructorType::Complete) {
+ GlobalDecl CompleteDecl;
+ GlobalDecl BaseDecl;
+ if (CD) {
+ CompleteDecl = GlobalDecl(CD, Ctor_Complete);
+ BaseDecl = GlobalDecl(CD, Ctor_Base);
+ } else {
+ CompleteDecl = GlobalDecl(DD, Dtor_Complete);
+ BaseDecl = GlobalDecl(DD, Dtor_Base);
+ }
+
+ if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
+ emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl);
+ return;
+ }
+
+ if (CGType == StructorCodegen::RAUW) {
+ StringRef MangledName = CGM.getMangledName(CompleteDecl);
+ auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(BaseDecl));
+ CGM.addReplacement(MangledName, Aliasee);
+ return;
+ }
+ }
+
+ // The base destructor is equivalent to the base destructor of its
+ // base class if there is exactly one non-virtual base class with a
+ // non-trivial destructor, there are no fields with a non-trivial
+ // destructor, and the body of the destructor is trivial.
+ if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT &&
+ !CGM.TryEmitBaseDestructorAsAlias(DD))
+ return;
+
+ llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type);
+
+ if (CGType == StructorCodegen::COMDAT) {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ if (DD)
+ getMangleContext().mangleCXXDtorComdat(DD, Out);
+ else
+ getMangleContext().mangleCXXCtorComdat(CD, Out);
+ llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
+ Fn->setComdat(C);
+ }
+}
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index a69d4dd3fe86..c80db7d18a1b 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -43,6 +43,7 @@ public:
CompleteObjectLocatorType(nullptr) {}
bool HasThisReturn(GlobalDecl GD) const override;
+ bool hasMostDerivedReturn(GlobalDecl GD) const override;
bool classifyReturnType(CGFunctionInfo &FI) const override;
@@ -50,14 +51,26 @@ public:
bool isSRetParameterAfterThis() const override { return true; }
+ size_t getSrcArgforCopyCtor(const CXXConstructorDecl *CD,
+ FunctionArgList &Args) const override {
+ assert(Args.size() >= 2 &&
+ "expected the arglist to have at least two args!");
+ // The 'most_derived' parameter goes second if the ctor is variadic and
+ // has v-bases.
+ if (CD->getParent()->getNumVBases() > 0 &&
+ CD->getType()->castAs<FunctionProtoType>()->isVariadic())
+ return 2;
+ return 1;
+ }
+
StringRef GetPureVirtualCallName() override { return "_purecall"; }
- // No known support for deleted functions in MSVC yet, so this choice is
- // arbitrary.
StringRef GetDeletedVirtualCallName() override { return "_purecall"; }
- llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF,
- llvm::Value *ptr,
- QualType type) override;
+ void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
+ llvm::Value *Ptr, QualType ElementType,
+ const CXXDestructorDecl *Dtor) override;
+
+ void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
llvm::GlobalVariable *getMSCompleteObjectLocator(const CXXRecordDecl *RD,
const VPtrInfo *Info);
@@ -89,10 +102,6 @@ public:
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) override;
- void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
- CXXCtorType Type, CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) override;
-
llvm::BasicBlock *
EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
const CXXRecordDecl *RD) override;
@@ -134,10 +143,8 @@ public:
// lacks a definition for the destructor, non-base destructors must always
// delegate to or alias the base destructor.
- void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
- CXXDtorType Type,
- CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) override;
+ void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ SmallVectorImpl<CanQualType> &ArgTys) override;
/// Non-base dtors should be emitted as delegating thunks in this ABI.
bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
@@ -207,10 +214,11 @@ public:
llvm::Value *This,
llvm::Type *Ty) override;
- void EmitVirtualDestructorCall(CodeGenFunction &CGF,
- const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType, SourceLocation CallLoc,
- llvm::Value *This) override;
+ llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
+ const CXXDestructorDecl *Dtor,
+ CXXDtorType DtorType,
+ llvm::Value *This,
+ const CXXMemberCallExpr *CE) override;
void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF, GlobalDecl GD,
CallArgList &CallArgs) override {
@@ -251,9 +259,22 @@ public:
llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
const ReturnAdjustment &RA) override;
+ void EmitThreadLocalInitFuncs(
+ CodeGenModule &CGM,
+ ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
+ CXXThreadLocals,
+ ArrayRef<llvm::Function *> CXXThreadLocalInits,
+ ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) override;
+
+ bool usesThreadWrapperFunction() const override { return false; }
+ LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
+ QualType LValType) override;
+
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *DeclPtr,
bool PerformInit) override;
+ void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *Dtor, llvm::Constant *Addr) override;
// ==== Notes on array cookies =========
//
@@ -440,6 +461,7 @@ private:
int32_t VBPtrOffset,
int32_t VBTableOffset,
llvm::Value **VBPtr = nullptr) {
+ assert(VBTableOffset % 4 == 0 && "should be byte offset into table of i32s");
llvm::Value *VBPOffset = llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset),
*VBTOffset = llvm::ConstantInt::get(CGM.IntTy, VBTableOffset);
return GetVBaseOffsetFromVBPtr(CGF, Base, VBPOffset, VBTOffset, VBPtr);
@@ -482,6 +504,22 @@ public:
bool isZeroInitializable(const MemberPointerType *MPT) override;
+ bool isMemberPointerConvertible(const MemberPointerType *MPT) const override {
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ return RD->hasAttr<MSInheritanceAttr>();
+ }
+
+ bool isTypeInfoCalculable(QualType Ty) const override {
+ if (!CGCXXABI::isTypeInfoCalculable(Ty))
+ return false;
+ if (const auto *MPT = Ty->getAs<MemberPointerType>()) {
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ if (!RD->hasAttr<MSInheritanceAttr>())
+ return false;
+ }
+ return true;
+ }
+
llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
@@ -516,6 +554,8 @@ public:
llvm::Value *&This, llvm::Value *MemPtr,
const MemberPointerType *MPT) override;
+ void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
+
private:
typedef std::pair<const CXXRecordDecl *, CharUnits> VFTableIdTy;
typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalVariable *> VTablesMapTy;
@@ -577,8 +617,15 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
if (RD->hasNonTrivialCopyConstructor())
return RAA_Indirect;
- // Win64 passes objects larger than 8 bytes indirectly.
- if (getContext().getTypeSize(RD->getTypeForDecl()) > 64)
+ // If an object has a destructor, we'd really like to pass it indirectly
+ // because it allows us to elide copies. Unfortunately, MSVC makes that
+ // impossible for small types, which it will pass in a single register or
+ // stack slot. Most objects with dtors are large-ish, so handle that early.
+ // We can't call out all large objects as being indirect because there are
+ // multiple x64 calling conventions and the C++ ABI code shouldn't dictate
+ // how we pass large POD types.
+ if (RD->hasNonTrivialDestructor() &&
+ getContext().getTypeSize(RD->getTypeForDecl()) > 64)
return RAA_Indirect;
// We have a trivial copy constructor or no copy constructors, but we have
@@ -605,11 +652,43 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
llvm_unreachable("invalid enum");
}
-llvm::Value *MicrosoftCXXABI::adjustToCompleteObject(CodeGenFunction &CGF,
- llvm::Value *ptr,
- QualType type) {
- // FIXME: implement
- return ptr;
+void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
+ const CXXDeleteExpr *DE,
+ llvm::Value *Ptr,
+ QualType ElementType,
+ const CXXDestructorDecl *Dtor) {
+ // FIXME: Provide a source location here even though there's no
+ // CXXMemberCallExpr for dtor call.
+ bool UseGlobalDelete = DE->isGlobalDelete();
+ CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
+ llvm::Value *MDThis =
+ EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
+ if (UseGlobalDelete)
+ CGF.EmitDeleteCall(DE->getOperatorDelete(), MDThis, ElementType);
+}
+
+static llvm::Function *getRethrowFn(CodeGenModule &CGM) {
+ // _CxxThrowException takes two pointer width arguments: a value and a context
+ // object which points to a TypeInfo object.
+ llvm::Type *ArgTypes[] = {CGM.Int8PtrTy, CGM.Int8PtrTy};
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
+ auto *Fn = cast<llvm::Function>(
+ CGM.CreateRuntimeFunction(FTy, "_CxxThrowException"));
+ // _CxxThrowException is stdcall on 32-bit x86 platforms.
+ if (CGM.getTarget().getTriple().getArch() == llvm::Triple::x86)
+ Fn->setCallingConv(llvm::CallingConv::X86_StdCall);
+ return Fn;
+}
+
+void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
+ llvm::Value *Args[] = {llvm::ConstantPointerNull::get(CGM.Int8PtrTy),
+ llvm::ConstantPointerNull::get(CGM.Int8PtrTy)};
+ auto *Fn = getRethrowFn(CGM);
+ if (isNoReturn)
+ CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, Args);
+ else
+ CGF.EmitRuntimeCallOrInvoke(Fn, Args);
}
/// \brief Gets the offset to the virtual base that contains the vfptr for
@@ -735,11 +814,9 @@ bool MicrosoftCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
return false;
}
-llvm::Value *
-MicrosoftCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
- llvm::Value *This,
- const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl) {
+llvm::Value *MicrosoftCXXABI::GetVirtualBaseClassOffset(
+ CodeGenFunction &CGF, llvm::Value *This, const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) {
int64_t VBPtrChars =
getContext().getASTRecordLayout(ClassDecl).getVBPtrOffset().getQuantity();
llvm::Value *VBPtrOffset = llvm::ConstantInt::get(CGM.PtrDiffTy, VBPtrChars);
@@ -748,12 +825,12 @@ MicrosoftCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
IntSize *
CGM.getMicrosoftVTableContext().getVBTableIndex(ClassDecl, BaseClassDecl);
llvm::Value *VBTableOffset =
- llvm::ConstantInt::get(CGM.IntTy, VBTableChars.getQuantity());
+ llvm::ConstantInt::get(CGM.IntTy, VBTableChars.getQuantity());
llvm::Value *VBPtrToNewBase =
- GetVBaseOffsetFromVBPtr(CGF, This, VBPtrOffset, VBTableOffset);
+ GetVBaseOffsetFromVBPtr(CGF, This, VBPtrOffset, VBTableOffset);
VBPtrToNewBase =
- CGF.Builder.CreateSExtOrBitCast(VBPtrToNewBase, CGM.PtrDiffTy);
+ CGF.Builder.CreateSExtOrBitCast(VBPtrToNewBase, CGM.PtrDiffTy);
return CGF.Builder.CreateNSWAdd(VBPtrOffset, VBPtrToNewBase);
}
@@ -761,6 +838,15 @@ bool MicrosoftCXXABI::HasThisReturn(GlobalDecl GD) const {
return isa<CXXConstructorDecl>(GD.getDecl());
}
+static bool isDeletingDtor(GlobalDecl GD) {
+ return isa<CXXDestructorDecl>(GD.getDecl()) &&
+ GD.getDtorType() == Dtor_Deleting;
+}
+
+bool MicrosoftCXXABI::hasMostDerivedReturn(GlobalDecl GD) const {
+ return isDeletingDtor(GD);
+}
+
bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
if (!RD)
@@ -782,23 +868,6 @@ bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
return false;
}
-void MicrosoftCXXABI::BuildConstructorSignature(
- const CXXConstructorDecl *Ctor, CXXCtorType Type, CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) {
-
- // All parameters are already in place except is_most_derived, which goes
- // after 'this' if it's variadic and last if it's not.
-
- const CXXRecordDecl *Class = Ctor->getParent();
- const FunctionProtoType *FPT = Ctor->getType()->castAs<FunctionProtoType>();
- if (Class->getNumVBases()) {
- if (FPT->isVariadic())
- ArgTys.insert(ArgTys.begin() + 1, CGM.getContext().IntTy);
- else
- ArgTys.push_back(CGM.getContext().IntTy);
- }
-}
-
llvm::BasicBlock *
MicrosoftCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
const CXXRecordDecl *RD) {
@@ -901,24 +970,36 @@ void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr());
llvm::Value *VBPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(ThisInt8Ptr, Offs.getQuantity());
- VBPtr = CGF.Builder.CreateBitCast(VBPtr, GV->getType()->getPointerTo(0),
+ llvm::Value *GVPtr = CGF.Builder.CreateConstInBoundsGEP2_32(GV, 0, 0);
+ VBPtr = CGF.Builder.CreateBitCast(VBPtr, GVPtr->getType()->getPointerTo(0),
"vbptr." + VBT->ReusingBase->getName());
- CGF.Builder.CreateStore(GV, VBPtr);
+ CGF.Builder.CreateStore(GVPtr, VBPtr);
}
}
-void MicrosoftCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
- CXXDtorType Type,
- CanQualType &ResTy,
+void
+MicrosoftCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
SmallVectorImpl<CanQualType> &ArgTys) {
- // 'this' is already in place
-
// TODO: 'for base' flag
-
- if (Type == Dtor_Deleting) {
+ if (T == StructorType::Deleting) {
// The scalar deleting destructor takes an implicit int parameter.
ArgTys.push_back(CGM.getContext().IntTy);
}
+ auto *CD = dyn_cast<CXXConstructorDecl>(MD);
+ if (!CD)
+ return;
+
+ // All parameters are already in place except is_most_derived, which goes
+ // after 'this' if it's variadic and last if it's not.
+
+ const CXXRecordDecl *Class = CD->getParent();
+ const FunctionProtoType *FPT = CD->getType()->castAs<FunctionProtoType>();
+ if (Class->getNumVBases()) {
+ if (FPT->isVariadic())
+ ArgTys.insert(ArgTys.begin() + 1, CGM.getContext().IntTy);
+ else
+ ArgTys.push_back(CGM.getContext().IntTy);
+ }
}
void MicrosoftCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
@@ -1030,14 +1111,6 @@ llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
return This;
}
-static bool IsDeletingDtor(GlobalDecl GD) {
- const CXXMethodDecl* MD = cast<CXXMethodDecl>(GD.getDecl());
- if (isa<CXXDestructorDecl>(MD)) {
- return GD.getDtorType() == Dtor_Deleting;
- }
- return false;
-}
-
void MicrosoftCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
QualType &ResTy,
FunctionArgList &Params) {
@@ -1058,7 +1131,7 @@ void MicrosoftCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
else
Params.push_back(IsMostDerived);
getStructorImplicitParamDecl(CGF) = IsMostDerived;
- } else if (IsDeletingDtor(CGF.CurGD)) {
+ } else if (isDeletingDtor(CGF.CurGD)) {
ImplicitParamDecl *ShouldDelete
= ImplicitParamDecl::Create(Context, nullptr,
CGF.CurGD.getDecl()->getLocation(),
@@ -1104,6 +1177,9 @@ void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
/// HasThisReturn only specifies a contract, not the implementation
if (HasThisReturn(CGF.CurGD))
CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
+ else if (hasMostDerivedReturn(CGF.CurGD))
+ CGF.Builder.CreateStore(CGF.EmitCastToVoidPtr(getThisValue(CGF)),
+ CGF.ReturnValue);
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
if (isa<CXXConstructorDecl>(MD) && MD->getParent()->getNumVBases()) {
@@ -1115,7 +1191,7 @@ void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
"is_most_derived");
}
- if (IsDeletingDtor(CGF.CurGD)) {
+ if (isDeletingDtor(CGF.CurGD)) {
assert(getStructorImplicitParamDecl(CGF) &&
"no implicit parameter for a deleting destructor?");
getStructorImplicitParamValue(CGF)
@@ -1154,7 +1230,7 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
bool Delegating, llvm::Value *This) {
- llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
+ llvm::Value *Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
if (DD->isVirtual()) {
assert(Type != CXXDtorType::Dtor_Deleting &&
@@ -1163,23 +1239,25 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
This, false);
}
- // FIXME: Provide a source location here.
- CGF.EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
- /*ImplicitParam=*/nullptr,
- /*ImplicitParamTy=*/QualType(), nullptr, nullptr);
+ CGF.EmitCXXStructorCall(DD, Callee, ReturnValueSlot(), This,
+ /*ImplicitParam=*/nullptr,
+ /*ImplicitParamTy=*/QualType(), nullptr,
+ getFromDtorType(Type));
}
void MicrosoftCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
const CXXRecordDecl *RD) {
MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
- VPtrInfoVector VFPtrs = VFTContext.getVFPtrOffsets(RD);
+ const VPtrInfoVector &VFPtrs = VFTContext.getVFPtrOffsets(RD);
for (VPtrInfo *Info : VFPtrs) {
llvm::GlobalVariable *VTable = getAddrOfVTable(RD, Info->FullOffsetInMDC);
if (VTable->hasInitializer())
continue;
- llvm::Constant *RTTI = getMSCompleteObjectLocator(RD, Info);
+ llvm::Constant *RTTI = getContext().getLangOpts().RTTIData
+ ? getMSCompleteObjectLocator(RD, Info)
+ : nullptr;
const VTableLayout &VTLayout =
VFTContext.getVFTableLayout(RD, Info->FullOffsetInMDC);
@@ -1240,7 +1318,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
MicrosoftVTableContext &VTContext = CGM.getMicrosoftVTableContext();
const VPtrInfoVector &VFPtrs = VTContext.getVFPtrOffsets(RD);
- if (DeferredVFTables.insert(RD)) {
+ if (DeferredVFTables.insert(RD).second) {
// We haven't processed this record type before.
// Queue up this v-table for possible deferred emission.
CGM.addDeferredVTable(RD);
@@ -1252,7 +1330,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
for (size_t J = 0, F = VFPtrs.size(); J != F; ++J) {
SmallString<256> Name;
mangleVFTableName(getMangleContext(), RD, VFPtrs[J], Name);
- if (!ObservedMangledNames.insert(Name.str()))
+ if (!ObservedMangledNames.insert(Name.str()).second)
llvm_unreachable("Already saw this mangling before?");
}
#endif
@@ -1372,29 +1450,30 @@ llvm::Value *MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
return Builder.CreateLoad(VFuncPtr);
}
-void MicrosoftCXXABI::EmitVirtualDestructorCall(CodeGenFunction &CGF,
- const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType,
- SourceLocation CallLoc,
- llvm::Value *This) {
+llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
+ CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
+ llvm::Value *This, const CXXMemberCallExpr *CE) {
+ assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
// We have only one destructor in the vftable but can get both behaviors
// by passing an implicit int parameter.
GlobalDecl GD(Dtor, Dtor_Deleting);
- const CGFunctionInfo *FInfo =
- &CGM.getTypes().arrangeCXXDestructor(Dtor, Dtor_Deleting);
+ const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
+ Dtor, StructorType::Deleting);
llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
llvm::Value *Callee = getVirtualFunctionPointer(CGF, GD, This, Ty);
ASTContext &Context = CGF.getContext();
- llvm::Value *ImplicitParam =
- llvm::ConstantInt::get(llvm::IntegerType::getInt32Ty(CGF.getLLVMContext()),
- DtorType == Dtor_Deleting);
+ llvm::Value *ImplicitParam = llvm::ConstantInt::get(
+ llvm::IntegerType::getInt32Ty(CGF.getLLVMContext()),
+ DtorType == Dtor_Deleting);
This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
- CGF.EmitCXXMemberCall(Dtor, CallLoc, Callee, ReturnValueSlot(), This,
- ImplicitParam, Context.IntTy, nullptr, nullptr);
+ RValue RV = CGF.EmitCXXStructorCall(Dtor, Callee, ReturnValueSlot(), This,
+ ImplicitParam, Context.IntTy, CE,
+ StructorType::Deleting);
+ return RV.getScalarVal();
}
const VBTableGlobals &
@@ -1427,6 +1506,9 @@ MicrosoftCXXABI::enumerateVBTables(const CXXRecordDecl *RD) {
llvm::Function *MicrosoftCXXABI::EmitVirtualMemPtrThunk(
const CXXMethodDecl *MD,
const MicrosoftVTableContext::MethodVFTableLocation &ML) {
+ assert(!isa<CXXConstructorDecl>(MD) && !isa<CXXDestructorDecl>(MD) &&
+ "can't form pointers to ctors or virtual dtors");
+
// Calculate the mangled name.
SmallString<256> ThunkName;
llvm::raw_svector_ostream Out(ThunkName);
@@ -1438,7 +1520,7 @@ llvm::Function *MicrosoftCXXABI::EmitVirtualMemPtrThunk(
return cast<llvm::Function>(GV);
// Create the llvm::Function.
- const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(MD);
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeMSMemberPointerThunk(MD);
llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
llvm::Function *ThunkFn =
llvm::Function::Create(ThunkTy, llvm::Function::ExternalLinkage,
@@ -1452,44 +1534,33 @@ llvm::Function *MicrosoftCXXABI::EmitVirtualMemPtrThunk(
CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn);
CGM.SetLLVMFunctionAttributesForDefinition(MD, ThunkFn);
+ // These thunks can be compared, so they are not unnamed.
+ ThunkFn->setUnnamedAddr(false);
+
// Start codegen.
CodeGenFunction CGF(CGM);
- CGF.StartThunk(ThunkFn, MD, FnInfo);
+ CGF.CurGD = GlobalDecl(MD);
+ CGF.CurFuncIsThunk = true;
+
+ // Build FunctionArgs, but only include the implicit 'this' parameter
+ // declaration.
+ FunctionArgList FunctionArgs;
+ buildThisParam(CGF, FunctionArgs);
+
+ // Start defining the function.
+ CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo,
+ FunctionArgs, MD->getLocation(), SourceLocation());
+ EmitThisParam(CGF);
// Load the vfptr and then callee from the vftable. The callee should have
// adjusted 'this' so that the vfptr is at offset zero.
- llvm::Value *This = CGF.LoadCXXThis();
- llvm::Value *VTable =
- CGF.GetVTablePtr(This, ThunkTy->getPointerTo()->getPointerTo());
+ llvm::Value *VTable = CGF.GetVTablePtr(
+ getThisValue(CGF), ThunkTy->getPointerTo()->getPointerTo());
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
llvm::Value *Callee = CGF.Builder.CreateLoad(VFuncPtr);
- unsigned CallingConv;
- CodeGen::AttributeListType AttributeList;
- CGM.ConstructAttributeList(FnInfo, MD, AttributeList, CallingConv, true);
- llvm::AttributeSet Attrs =
- llvm::AttributeSet::get(CGF.getLLVMContext(), AttributeList);
-
- // Do a musttail call with perfect argument forwarding. Any inalloca argument
- // will be forwarded in place without any copy.
- SmallVector<llvm::Value *, 8> Args;
- for (llvm::Argument &A : ThunkFn->args())
- Args.push_back(&A);
- llvm::CallInst *Call = CGF.Builder.CreateCall(Callee, Args);
- Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
- Call->setAttributes(Attrs);
- Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
-
- if (Call->getType()->isVoidTy())
- CGF.Builder.CreateRetVoid();
- else
- CGF.Builder.CreateRet(Call);
-
- // Finish the function to maintain CodeGenFunction invariants.
- // FIXME: Don't emit unreachable code.
- CGF.EmitBlock(CGF.createBasicBlock());
- CGF.FinishFunction();
+ CGF.EmitMustTailThunk(MD, getThisValue(CGF), Callee);
return ThunkFn;
}
@@ -1703,6 +1774,86 @@ llvm::Value* MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
cookieSize.getQuantity());
}
+static void emitGlobalDtorWithTLRegDtor(CodeGenFunction &CGF, const VarDecl &VD,
+ llvm::Constant *Dtor,
+ llvm::Constant *Addr) {
+ // Create a function which calls the destructor.
+ llvm::Constant *DtorStub = CGF.createAtExitStub(VD, Dtor, Addr);
+
+ // extern "C" int __tlregdtor(void (*f)(void));
+ llvm::FunctionType *TLRegDtorTy = llvm::FunctionType::get(
+ CGF.IntTy, DtorStub->getType(), /*IsVarArg=*/false);
+
+ llvm::Constant *TLRegDtor =
+ CGF.CGM.CreateRuntimeFunction(TLRegDtorTy, "__tlregdtor");
+ if (llvm::Function *TLRegDtorFn = dyn_cast<llvm::Function>(TLRegDtor))
+ TLRegDtorFn->setDoesNotThrow();
+
+ CGF.EmitNounwindRuntimeCall(TLRegDtor, DtorStub);
+}
+
+void MicrosoftCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *Dtor,
+ llvm::Constant *Addr) {
+ if (D.getTLSKind())
+ return emitGlobalDtorWithTLRegDtor(CGF, D, Dtor, Addr);
+
+ // The default behavior is to use atexit.
+ CGF.registerGlobalDtorWithAtExit(D, Dtor, Addr);
+}
+
+void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
+ CodeGenModule &CGM,
+ ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
+ CXXThreadLocals,
+ ArrayRef<llvm::Function *> CXXThreadLocalInits,
+ ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) {
+ // This will create a GV in the .CRT$XDU section. It will point to our
+ // initialization function. The CRT will call all of these function
+ // pointers at start-up time and, eventually, at thread-creation time.
+ auto AddToXDU = [&CGM](llvm::Function *InitFunc) {
+ llvm::GlobalVariable *InitFuncPtr = new llvm::GlobalVariable(
+ CGM.getModule(), InitFunc->getType(), /*IsConstant=*/true,
+ llvm::GlobalVariable::InternalLinkage, InitFunc,
+ Twine(InitFunc->getName(), "$initializer$"));
+ InitFuncPtr->setSection(".CRT$XDU");
+ // This variable has discardable linkage, we have to add it to @llvm.used to
+ // ensure it won't get discarded.
+ CGM.addUsedGlobal(InitFuncPtr);
+ return InitFuncPtr;
+ };
+
+ std::vector<llvm::Function *> NonComdatInits;
+ for (size_t I = 0, E = CXXThreadLocalInitVars.size(); I != E; ++I) {
+ llvm::GlobalVariable *GV = CXXThreadLocalInitVars[I];
+ llvm::Function *F = CXXThreadLocalInits[I];
+
+ // If the GV is already in a comdat group, then we have to join it.
+ if (llvm::Comdat *C = GV->getComdat())
+ AddToXDU(F)->setComdat(C);
+ else
+ NonComdatInits.push_back(F);
+ }
+
+ if (!NonComdatInits.empty()) {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ llvm::Function *InitFunc = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, "__tls_init", SourceLocation(),
+ /*TLS=*/true);
+ CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, NonComdatInits);
+
+ AddToXDU(InitFunc);
+ }
+}
+
+LValue MicrosoftCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ QualType LValType) {
+ CGF.CGM.ErrorUnsupported(VD, "thread wrappers");
+ return LValue();
+}
+
void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *GV,
bool PerformInit) {
@@ -1949,8 +2100,8 @@ MicrosoftCXXABI::BuildMemberPointer(const CXXRecordDecl *RD,
CodeGenTypes &Types = CGM.getTypes();
llvm::Constant *FirstField;
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
if (!MD->isVirtual()) {
- const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
llvm::Type *Ty;
// Check whether the function has a computable LLVM signature.
if (Types.isFuncTypeConvertible(FPT)) {
@@ -1966,14 +2117,14 @@ MicrosoftCXXABI::BuildMemberPointer(const CXXRecordDecl *RD,
} else {
MicrosoftVTableContext::MethodVFTableLocation ML =
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(MD);
- if (MD->isVariadic()) {
- CGM.ErrorUnsupported(MD, "pointer to variadic virtual member function");
- FirstField = llvm::Constant::getNullValue(CGM.VoidPtrTy);
- } else if (!CGM.getTypes().isFuncTypeConvertible(
- MD->getType()->castAs<FunctionType>())) {
+ if (!CGM.getTypes().isFuncTypeConvertible(
+ MD->getType()->castAs<FunctionType>())) {
CGM.ErrorUnsupported(MD, "pointer to virtual member function with "
"incomplete return or parameter type");
FirstField = llvm::Constant::getNullValue(CGM.VoidPtrTy);
+ } else if (FPT->getCallConv() == CC_X86FastCall) {
+ CGM.ErrorUnsupported(MD, "pointer to fastcall virtual member function");
+ FirstField = llvm::Constant::getNullValue(CGM.VoidPtrTy);
} else if (ML.VBase) {
CGM.ErrorUnsupported(MD, "pointer to virtual member function overriding "
"member function in virtual base class");
@@ -2128,11 +2279,17 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
llvm::Value *VBPtr =
Builder.CreateInBoundsGEP(This, VBPtrOffset, "vbptr");
if (VBPtrOut) *VBPtrOut = VBPtr;
- VBPtr = Builder.CreateBitCast(VBPtr, CGM.Int8PtrTy->getPointerTo(0));
+ VBPtr = Builder.CreateBitCast(VBPtr,
+ CGM.Int32Ty->getPointerTo(0)->getPointerTo(0));
llvm::Value *VBTable = Builder.CreateLoad(VBPtr, "vbtable");
+ // Translate from byte offset to table index. It improves analyzability.
+ llvm::Value *VBTableIndex = Builder.CreateAShr(
+ VBTableOffset, llvm::ConstantInt::get(VBTableOffset->getType(), 2),
+ "vbtindex", /*isExact=*/true);
+
// Load an i32 offset from the vb-table.
- llvm::Value *VBaseOffs = Builder.CreateInBoundsGEP(VBTable, VBTableOffset);
+ llvm::Value *VBaseOffs = Builder.CreateInBoundsGEP(VBTable, VBTableIndex);
VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
return Builder.CreateLoad(VBaseOffs, "vbase_offs");
}
@@ -2645,11 +2802,11 @@ detectAmbiguousBases(SmallVectorImpl<MSRTTIClass> &Classes) {
llvm::SmallPtrSet<const CXXRecordDecl *, 8> AmbiguousBases;
for (MSRTTIClass *Class = &Classes.front(); Class <= &Classes.back();) {
if ((Class->Flags & MSRTTIClass::IsVirtual) &&
- !VirtualBases.insert(Class->RD)) {
+ !VirtualBases.insert(Class->RD).second) {
Class = MSRTTIClass::getNextChild(Class);
continue;
}
- if (!UniqueBases.insert(Class->RD))
+ if (!UniqueBases.insert(Class->RD).second)
AmbiguousBases.insert(Class->RD);
Class++;
}
@@ -2875,3 +3032,45 @@ MicrosoftCXXABI::getMSCompleteObjectLocator(const CXXRecordDecl *RD,
const VPtrInfo *Info) {
return MSRTTIBuilder(*this, RD).getCompleteObjectLocator(Info);
}
+
+static void emitCXXConstructor(CodeGenModule &CGM,
+ const CXXConstructorDecl *ctor,
+ StructorType ctorType) {
+ // There are no constructor variants, always emit the complete destructor.
+ CGM.codegenCXXStructor(ctor, StructorType::Complete);
+}
+
+static void emitCXXDestructor(CodeGenModule &CGM, const CXXDestructorDecl *dtor,
+ StructorType dtorType) {
+ // The complete destructor is equivalent to the base destructor for
+ // classes with no virtual bases, so try to emit it as an alias.
+ if (!dtor->getParent()->getNumVBases() &&
+ (dtorType == StructorType::Complete || dtorType == StructorType::Base)) {
+ bool ProducedAlias = !CGM.TryEmitDefinitionAsAlias(
+ GlobalDecl(dtor, Dtor_Complete), GlobalDecl(dtor, Dtor_Base), true);
+ if (ProducedAlias) {
+ if (dtorType == StructorType::Complete)
+ return;
+ if (dtor->isVirtual())
+ CGM.getVTables().EmitThunks(GlobalDecl(dtor, Dtor_Complete));
+ }
+ }
+
+ // The base destructor is equivalent to the base destructor of its
+ // base class if there is exactly one non-virtual base class with a
+ // non-trivial destructor, there are no fields with a non-trivial
+ // destructor, and the body of the destructor is trivial.
+ if (dtorType == StructorType::Base && !CGM.TryEmitBaseDestructorAsAlias(dtor))
+ return;
+
+ CGM.codegenCXXStructor(dtor, dtorType);
+}
+
+void MicrosoftCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
+ StructorType Type) {
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+ emitCXXConstructor(CGM, CD, Type);
+ return;
+ }
+ emitCXXDestructor(CGM, cast<CXXDestructorDecl>(MD), Type);
+}
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index c5d18d3286a7..4f1a82e0248d 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -33,17 +33,41 @@ namespace {
std::unique_ptr<const llvm::DataLayout> TD;
ASTContext *Ctx;
const CodeGenOptions CodeGenOpts; // Intentionally copied in.
+
+ unsigned HandlingTopLevelDecls;
+ struct HandlingTopLevelDeclRAII {
+ CodeGeneratorImpl &Self;
+ HandlingTopLevelDeclRAII(CodeGeneratorImpl &Self) : Self(Self) {
+ ++Self.HandlingTopLevelDecls;
+ }
+ ~HandlingTopLevelDeclRAII() {
+ if (--Self.HandlingTopLevelDecls == 0)
+ Self.EmitDeferredDecls();
+ }
+ };
+
+ CoverageSourceInfo *CoverageInfo;
+
protected:
std::unique_ptr<llvm::Module> M;
std::unique_ptr<CodeGen::CodeGenModule> Builder;
+ private:
+ SmallVector<CXXMethodDecl *, 8> DeferredInlineMethodDefinitions;
+
public:
CodeGeneratorImpl(DiagnosticsEngine &diags, const std::string& ModuleName,
- const CodeGenOptions &CGO, llvm::LLVMContext& C)
- : Diags(diags), CodeGenOpts(CGO),
+ const CodeGenOptions &CGO, llvm::LLVMContext& C,
+ CoverageSourceInfo *CoverageInfo = nullptr)
+ : Diags(diags), Ctx(nullptr), CodeGenOpts(CGO), HandlingTopLevelDecls(0),
+ CoverageInfo(CoverageInfo),
M(new llvm::Module(ModuleName, C)) {}
- virtual ~CodeGeneratorImpl() {}
+ virtual ~CodeGeneratorImpl() {
+ // There should normally not be any leftover inline method definitions.
+ assert(DeferredInlineMethodDefinitions.empty() ||
+ Diags.hasErrorOccurred());
+ }
llvm::Module* GetModule() override {
return M.get();
@@ -73,7 +97,7 @@ namespace {
M->setDataLayout(Ctx->getTargetInfo().getTargetDescription());
TD.reset(new llvm::DataLayout(Ctx->getTargetInfo().getTargetDescription()));
Builder.reset(new CodeGen::CodeGenModule(Context, CodeGenOpts, *M, *TD,
- Diags));
+ Diags, CoverageInfo));
for (size_t i = 0, e = CodeGenOpts.DependentLibraries.size(); i < e; ++i)
HandleDependentLibrary(CodeGenOpts.DependentLibraries[i]);
@@ -90,18 +114,28 @@ namespace {
if (Diags.hasErrorOccurred())
return true;
+ HandlingTopLevelDeclRAII HandlingDecl(*this);
+
// Make sure to emit all elements of a Decl.
for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
Builder->EmitTopLevelDecl(*I);
- // Emit any deferred inline method definitions.
- for (CXXMethodDecl *MD : DeferredInlineMethodDefinitions)
- Builder->EmitTopLevelDecl(MD);
- DeferredInlineMethodDefinitions.clear();
-
return true;
}
+ void EmitDeferredDecls() {
+ if (DeferredInlineMethodDefinitions.empty())
+ return;
+
+ // Emit any deferred inline method definitions. Note that more deferred
+ // methods may be added during this loop, since ASTConsumer callbacks
+ // can be invoked if AST inspection results in declarations being added.
+ HandlingTopLevelDeclRAII HandlingDecl(*this);
+ for (unsigned I = 0; I != DeferredInlineMethodDefinitions.size(); ++I)
+ Builder->EmitTopLevelDecl(DeferredInlineMethodDefinitions[I]);
+ DeferredInlineMethodDefinitions.clear();
+ }
+
void HandleInlineMethodDefinition(CXXMethodDecl *D) override {
if (Diags.hasErrorOccurred())
return;
@@ -117,6 +151,12 @@ namespace {
// void foo() { bar(); }
// } A;
DeferredInlineMethodDefinitions.push_back(D);
+
+ // Provide some coverage mapping even for methods that aren't emitted.
+ // Don't do this for templated classes though, as they may not be
+ // instantiable.
+ if (!D->getParent()->getDescribedClassTemplate())
+ Builder->AddDeferredUnusedCoverageMapping(D);
}
/// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
@@ -190,9 +230,6 @@ namespace {
void HandleDependentLibrary(llvm::StringRef Lib) override {
Builder->AddDependentLib(Lib);
}
-
- private:
- std::vector<CXXMethodDecl *> DeferredInlineMethodDefinitions;
};
}
@@ -202,6 +239,7 @@ CodeGenerator *clang::CreateLLVMCodeGen(DiagnosticsEngine &Diags,
const std::string& ModuleName,
const CodeGenOptions &CGO,
const TargetOptions &/*TO*/,
- llvm::LLVMContext& C) {
- return new CodeGeneratorImpl(Diags, ModuleName, CGO, C);
+ llvm::LLVMContext& C,
+ CoverageSourceInfo *CoverageInfo) {
+ return new CodeGeneratorImpl(Diags, ModuleName, CGO, C, CoverageInfo);
}
diff --git a/lib/CodeGen/SanitizerBlacklist.cpp b/lib/CodeGen/SanitizerBlacklist.cpp
deleted file mode 100644
index 9f1ddc8e7d7b..000000000000
--- a/lib/CodeGen/SanitizerBlacklist.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-//===--- SanitizerBlacklist.cpp - Blacklist for sanitizers ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// User-provided blacklist used to disable/alter instrumentation done in
-// sanitizers.
-//
-//===----------------------------------------------------------------------===//
-#include "SanitizerBlacklist.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalValue.h"
-#include "llvm/IR/Module.h"
-
-using namespace clang;
-using namespace CodeGen;
-
-static StringRef GetGlobalTypeString(const llvm::GlobalValue &G) {
- // Types of GlobalVariables are always pointer types.
- llvm::Type *GType = G.getType()->getElementType();
- // For now we support blacklisting struct types only.
- if (llvm::StructType *SGType = dyn_cast<llvm::StructType>(GType)) {
- if (!SGType->isLiteral())
- return SGType->getName();
- }
- return "<unknown type>";
-}
-
-bool SanitizerBlacklist::isIn(const llvm::Module &M,
- const StringRef Category) const {
- return SCL->inSection("src", M.getModuleIdentifier(), Category);
-}
-
-bool SanitizerBlacklist::isIn(const llvm::Function &F) const {
- return isIn(*F.getParent()) ||
- SCL->inSection("fun", F.getName(), "");
-}
-
-bool SanitizerBlacklist::isIn(const llvm::GlobalVariable &G,
- const StringRef Category) const {
- return isIn(*G.getParent(), Category) ||
- SCL->inSection("global", G.getName(), Category) ||
- SCL->inSection("type", GetGlobalTypeString(G), Category);
-}
-
-bool SanitizerBlacklist::isBlacklistedType(StringRef MangledTypeName) const {
- return SCL->inSection("type", MangledTypeName);
-}
diff --git a/lib/CodeGen/SanitizerBlacklist.h b/lib/CodeGen/SanitizerBlacklist.h
deleted file mode 100644
index 659441dfe346..000000000000
--- a/lib/CodeGen/SanitizerBlacklist.h
+++ /dev/null
@@ -1,46 +0,0 @@
-//===--- SanitizerBlacklist.h - Blacklist for sanitizers --------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// User-provided blacklist used to disable/alter instrumentation done in
-// sanitizers.
-//
-//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_SANITIZERBLACKLIST_H
-#define CLANG_CODEGEN_SANITIZERBLACKLIST_H
-
-#include "clang/Basic/LLVM.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/SpecialCaseList.h"
-#include <memory>
-
-namespace llvm {
-class GlobalVariable;
-class Function;
-class Module;
-}
-
-namespace clang {
-namespace CodeGen {
-
-class SanitizerBlacklist {
- std::unique_ptr<llvm::SpecialCaseList> SCL;
-
-public:
- SanitizerBlacklist(llvm::SpecialCaseList *SCL) : SCL(SCL) {}
- bool isIn(const llvm::Module &M,
- const StringRef Category = StringRef()) const;
- bool isIn(const llvm::Function &F) const;
- bool isIn(const llvm::GlobalVariable &G,
- const StringRef Category = StringRef()) const;
- bool isBlacklistedType(StringRef MangledTypeName) const;
-};
-} // end namespace CodeGen
-} // end namespace clang
-
-#endif
diff --git a/lib/CodeGen/SanitizerMetadata.cpp b/lib/CodeGen/SanitizerMetadata.cpp
new file mode 100644
index 000000000000..7c38b2807e95
--- /dev/null
+++ b/lib/CodeGen/SanitizerMetadata.cpp
@@ -0,0 +1,92 @@
+//===--- SanitizerMetadata.cpp - Blacklist for sanitizers -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Class which emits metadata consumed by sanitizer instrumentation passes.
+//
+//===----------------------------------------------------------------------===//
+#include "SanitizerMetadata.h"
+#include "CodeGenModule.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Constants.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+SanitizerMetadata::SanitizerMetadata(CodeGenModule &CGM) : CGM(CGM) {}
+
+void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
+ SourceLocation Loc, StringRef Name,
+ QualType Ty, bool IsDynInit,
+ bool IsBlacklisted) {
+ if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address))
+ return;
+ IsDynInit &= !CGM.isInSanitizerBlacklist(GV, Loc, Ty, "init");
+ IsBlacklisted |= CGM.isInSanitizerBlacklist(GV, Loc, Ty);
+
+ llvm::Metadata *LocDescr = nullptr;
+ llvm::Metadata *GlobalName = nullptr;
+ llvm::LLVMContext &VMContext = CGM.getLLVMContext();
+ if (!IsBlacklisted) {
+ // Don't generate source location and global name if it is blacklisted -
+ // it won't be instrumented anyway.
+ LocDescr = getLocationMetadata(Loc);
+ if (!Name.empty())
+ GlobalName = llvm::MDString::get(VMContext, Name);
+ }
+
+ llvm::Metadata *GlobalMetadata[] = {
+ llvm::ConstantAsMetadata::get(GV), LocDescr, GlobalName,
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsDynInit)),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt1Ty(VMContext), IsBlacklisted))};
+
+ llvm::MDNode *ThisGlobal = llvm::MDNode::get(VMContext, GlobalMetadata);
+ llvm::NamedMDNode *AsanGlobals =
+ CGM.getModule().getOrInsertNamedMetadata("llvm.asan.globals");
+ AsanGlobals->addOperand(ThisGlobal);
+}
+
+void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
+ const VarDecl &D, bool IsDynInit) {
+ if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address))
+ return;
+ std::string QualName;
+ llvm::raw_string_ostream OS(QualName);
+ D.printQualifiedName(OS);
+ reportGlobalToASan(GV, D.getLocation(), OS.str(), D.getType(), IsDynInit);
+}
+
+void SanitizerMetadata::disableSanitizerForGlobal(llvm::GlobalVariable *GV) {
+ // For now, just make sure the global is not modified by the ASan
+ // instrumentation.
+ if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address))
+ reportGlobalToASan(GV, SourceLocation(), "", QualType(), false, true);
+}
+
+void SanitizerMetadata::disableSanitizerForInstruction(llvm::Instruction *I) {
+ I->setMetadata(CGM.getModule().getMDKindID("nosanitize"),
+ llvm::MDNode::get(CGM.getLLVMContext(), None));
+}
+
+llvm::MDNode *SanitizerMetadata::getLocationMetadata(SourceLocation Loc) {
+ PresumedLoc PLoc = CGM.getContext().getSourceManager().getPresumedLoc(Loc);
+ if (!PLoc.isValid())
+ return nullptr;
+ llvm::LLVMContext &VMContext = CGM.getLLVMContext();
+ llvm::Metadata *LocMetadata[] = {
+ llvm::MDString::get(VMContext, PLoc.getFilename()),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), PLoc.getLine())),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), PLoc.getColumn())),
+ };
+ return llvm::MDNode::get(VMContext, LocMetadata);
+}
diff --git a/lib/CodeGen/SanitizerMetadata.h b/lib/CodeGen/SanitizerMetadata.h
new file mode 100644
index 000000000000..d2f0651159a2
--- /dev/null
+++ b/lib/CodeGen/SanitizerMetadata.h
@@ -0,0 +1,53 @@
+//===--- SanitizerMetadata.h - Metadata for sanitizers ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Class which emits metadata consumed by sanitizer instrumentation passes.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LIB_CODEGEN_SANITIZERMETADATA_H
+#define LLVM_CLANG_LIB_CODEGEN_SANITIZERMETADATA_H
+
+#include "clang/AST/Type.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+
+namespace llvm {
+class GlobalVariable;
+class Instruction;
+class MDNode;
+}
+
+namespace clang {
+class VarDecl;
+
+namespace CodeGen {
+
+class CodeGenModule;
+
+class SanitizerMetadata {
+ SanitizerMetadata(const SanitizerMetadata &) LLVM_DELETED_FUNCTION;
+ void operator=(const SanitizerMetadata &) LLVM_DELETED_FUNCTION;
+
+ CodeGenModule &CGM;
+public:
+ SanitizerMetadata(CodeGenModule &CGM);
+ void reportGlobalToASan(llvm::GlobalVariable *GV, const VarDecl &D,
+ bool IsDynInit = false);
+ void reportGlobalToASan(llvm::GlobalVariable *GV, SourceLocation Loc,
+ StringRef Name, QualType Ty, bool IsDynInit = false,
+ bool IsBlacklisted = false);
+ void disableSanitizerForGlobal(llvm::GlobalVariable *GV);
+ void disableSanitizerForInstruction(llvm::Instruction *I);
+private:
+ llvm::MDNode *getLocationMetadata(SourceLocation Loc);
+};
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index f75e59d6f2d8..6ad349515769 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -15,15 +15,16 @@
#include "TargetInfo.h"
#include "ABIInfo.h"
#include "CGCXXABI.h"
+#include "CGValue.h"
#include "CodeGenFunction.h"
#include "clang/AST/RecordLayout.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/raw_ostream.h"
-
#include <algorithm> // std::sort
using namespace clang;
@@ -64,6 +65,19 @@ static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
return getRecordArgABI(RT, CXXABI);
}
+/// Pass transparent unions as if they were the type of the first element. Sema
+/// should ensure that all elements of the union have the same "machine type".
+static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
+ if (const RecordType *UT = Ty->getAsUnionType()) {
+ const RecordDecl *UD = UT->getDecl();
+ if (UD->hasAttr<TransparentUnionAttr>()) {
+ assert(!UD->field_empty() && "sema created an empty transparent union");
+ return UD->field_begin()->getType();
+ }
+ }
+ return Ty;
+}
+
CGCXXABI &ABIInfo::getCXXABI() const {
return CGT.getCXXABI();
}
@@ -84,6 +98,15 @@ const TargetInfo &ABIInfo::getTarget() const {
return CGT.getTarget();
}
+bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ return false;
+}
+
+bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const {
+ return false;
+}
+
void ABIArgInfo::dump() const {
raw_ostream &OS = llvm::errs();
OS << "(ABIArgInfo Kind=";
@@ -498,18 +521,39 @@ static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
return Ty;
}
+/// Returns true if this type can be passed in SSE registers with the
+/// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
+static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
+ return true;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
+ // registers specially.
+ unsigned VecSize = Context.getTypeSize(VT);
+ if (VecSize == 128 || VecSize == 256 || VecSize == 512)
+ return true;
+ }
+ return false;
+}
+
+/// Returns true if this aggregate is small enough to be passed in SSE registers
+/// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
+static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
+ return NumMembers <= 4;
+}
+
//===----------------------------------------------------------------------===//
// X86-32 ABI Implementation
//===----------------------------------------------------------------------===//
/// \brief Similar to llvm::CCState, but for Clang.
struct CCState {
- CCState(unsigned CC) : CC(CC), FreeRegs(0) {}
+ CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
unsigned CC;
unsigned FreeRegs;
- unsigned StackOffset;
- bool UseInAlloca;
+ unsigned FreeSSERegs;
};
/// X86_32ABIInfo - The X86-32 ABI information.
@@ -530,6 +574,17 @@ class X86_32ABIInfo : public ABIInfo {
return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
}
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorTypeForVectorCall(getContext(), Ty);
+ }
+
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t NumMembers) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorCallAggregateSmallEnough(NumMembers);
+ }
+
bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
@@ -593,6 +648,14 @@ public:
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
+ void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
+ std::string &Constraints,
+ std::vector<llvm::Type *> &ResultRegTypes,
+ std::vector<llvm::Type *> &ResultTruncRegTypes,
+ std::vector<LValue> &ResultRegDests,
+ std::string &AsmString,
+ unsigned NumOutputs) const override;
+
llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
unsigned Sig = (0xeb << 0) | // jmp rel8
@@ -606,6 +669,85 @@ public:
}
+/// Rewrite input constraint references after adding some output constraints.
+/// In the case where there is one output and one input and we add one output,
+/// we need to replace all operand references greater than or equal to 1:
+/// mov $0, $1
+/// mov eax, $1
+/// The result will be:
+/// mov $0, $2
+/// mov eax, $2
+static void rewriteInputConstraintReferences(unsigned FirstIn,
+ unsigned NumNewOuts,
+ std::string &AsmString) {
+ std::string Buf;
+ llvm::raw_string_ostream OS(Buf);
+ size_t Pos = 0;
+ while (Pos < AsmString.size()) {
+ size_t DollarStart = AsmString.find('$', Pos);
+ if (DollarStart == std::string::npos)
+ DollarStart = AsmString.size();
+ size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
+ if (DollarEnd == std::string::npos)
+ DollarEnd = AsmString.size();
+ OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
+ Pos = DollarEnd;
+ size_t NumDollars = DollarEnd - DollarStart;
+ if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
+ // We have an operand reference.
+ size_t DigitStart = Pos;
+ size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
+ if (DigitEnd == std::string::npos)
+ DigitEnd = AsmString.size();
+ StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
+ unsigned OperandIndex;
+ if (!OperandStr.getAsInteger(10, OperandIndex)) {
+ if (OperandIndex >= FirstIn)
+ OperandIndex += NumNewOuts;
+ OS << OperandIndex;
+ } else {
+ OS << OperandStr;
+ }
+ Pos = DigitEnd;
+ }
+ }
+ AsmString = std::move(OS.str());
+}
+
+/// Add output constraints for EAX:EDX because they are return registers.
+void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
+ CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
+ std::vector<llvm::Type *> &ResultRegTypes,
+ std::vector<llvm::Type *> &ResultTruncRegTypes,
+ std::vector<LValue> &ResultRegDests, std::string &AsmString,
+ unsigned NumOutputs) const {
+ uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
+
+ // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
+ // larger.
+ if (!Constraints.empty())
+ Constraints += ',';
+ if (RetWidth <= 32) {
+ Constraints += "={eax}";
+ ResultRegTypes.push_back(CGF.Int32Ty);
+ } else {
+ // Use the 'A' constraint for EAX:EDX.
+ Constraints += "=A";
+ ResultRegTypes.push_back(CGF.Int64Ty);
+ }
+
+ // Truncate EAX or EAX:EDX to an integer of the appropriate size.
+ llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
+ ResultTruncRegTypes.push_back(CoerceTy);
+
+ // Coerce the integer by bitcasting the return slot pointer.
+ ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
+ CoerceTy->getPointerTo()));
+ ResultRegDests.push_back(ReturnSlot);
+
+ rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
+}
+
/// shouldReturnTypeInRegister - Determine if the given type should be
/// passed in a register (for the Darwin ABI).
bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
@@ -670,6 +812,14 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State) con
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ if (State.CC == llvm::CallingConv::X86_VectorCall &&
+ isHomogeneousAggregate(RetTy, Base, NumElts)) {
+ // The LLVM struct type for such an aggregate should lower properly.
+ return ABIArgInfo::getDirect();
+ }
+
if (const VectorType *VT = RetTy->getAs<VectorType>()) {
// On Darwin, some vectors are returned in registers.
if (IsDarwinVectorABI) {
@@ -842,7 +992,8 @@ bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
State.FreeRegs -= SizeInRegs;
- if (State.CC == llvm::CallingConv::X86_FastCall) {
+ if (State.CC == llvm::CallingConv::X86_FastCall ||
+ State.CC == llvm::CallingConv::X86_VectorCall) {
if (Size > 32)
return false;
@@ -867,17 +1018,38 @@ bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
CCState &State) const {
// FIXME: Set alignment on indirect arguments.
- if (isAggregateTypeForABI(Ty)) {
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- // Check with the C++ ABI first.
- CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
- if (RAA == CGCXXABI::RAA_Indirect) {
- return getIndirectResult(Ty, false, State);
- } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
- // The field index doesn't matter, we'll fix it up later.
- return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
- }
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Check with the C++ ABI first.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
+ if (RAA == CGCXXABI::RAA_Indirect) {
+ return getIndirectResult(Ty, false, State);
+ } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
+ // The field index doesn't matter, we'll fix it up later.
+ return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
+ }
+ }
+
+ // vectorcall adds the concept of a homogenous vector aggregate, similar
+ // to other targets.
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ if (State.CC == llvm::CallingConv::X86_VectorCall &&
+ isHomogeneousAggregate(Ty, Base, NumElts)) {
+ if (State.FreeSSERegs >= NumElts) {
+ State.FreeSSERegs -= NumElts;
+ if (Ty->isBuiltinType() || Ty->isVectorType())
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getExpand();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
+ if (isAggregateTypeForABI(Ty)) {
+ if (RT) {
// Structs are always byval on win32, regardless of what they contain.
if (IsWin32StructABI)
return getIndirectResult(Ty, true, State);
@@ -909,7 +1081,9 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
if (getContext().getTypeSize(Ty) <= 4*32 &&
canExpandIndirectArgument(Ty, getContext()))
return ABIArgInfo::getExpandWithPadding(
- State.CC == llvm::CallingConv::X86_FastCall, PaddingType);
+ State.CC == llvm::CallingConv::X86_FastCall ||
+ State.CC == llvm::CallingConv::X86_VectorCall,
+ PaddingType);
return getIndirectResult(Ty, true, State);
}
@@ -952,7 +1126,10 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
CCState State(FI.getCallingConvention());
if (State.CC == llvm::CallingConv::X86_FastCall)
State.FreeRegs = 2;
- else if (FI.getHasRegParm())
+ else if (State.CC == llvm::CallingConv::X86_VectorCall) {
+ State.FreeRegs = 2;
+ State.FreeSSERegs = 6;
+ } else if (FI.getHasRegParm())
State.FreeRegs = FI.getRegParm();
else
State.FreeRegs = DefaultNumRegisterParameters;
@@ -968,6 +1145,10 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
}
}
+ // The chain argument effectively gives us another free register.
+ if (FI.isChainCall())
+ ++State.FreeRegs;
+
bool UsedInAlloca = false;
for (auto &I : FI.arguments()) {
I.info = classifyArgumentType(I.type, State);
@@ -1002,6 +1183,26 @@ X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
}
}
+static bool isArgInAlloca(const ABIArgInfo &Info) {
+ // Leave ignored and inreg arguments alone.
+ switch (Info.getKind()) {
+ case ABIArgInfo::InAlloca:
+ return true;
+ case ABIArgInfo::Indirect:
+ assert(Info.getIndirectByVal());
+ return true;
+ case ABIArgInfo::Ignore:
+ return false;
+ case ABIArgInfo::Direct:
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Expand:
+ if (Info.getInReg())
+ return false;
+ return true;
+ }
+ llvm_unreachable("invalid enum");
+}
+
void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
assert(IsWin32StructABI && "inalloca only supported on win32");
@@ -1009,9 +1210,19 @@ void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
SmallVector<llvm::Type *, 6> FrameFields;
unsigned StackOffset = 0;
+ CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
- // Put the sret parameter into the inalloca struct if it's in memory.
+ // Put 'this' into the struct before 'sret', if necessary.
+ bool IsThisCall =
+ FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
ABIArgInfo &Ret = FI.getReturnInfo();
+ if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
+ isArgInAlloca(I->info)) {
+ addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
+ ++I;
+ }
+
+ // Put the sret parameter into the inalloca struct if it's in memory.
if (Ret.isIndirect() && !Ret.getInReg()) {
CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
@@ -1020,30 +1231,13 @@ void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
}
// Skip the 'this' parameter in ecx.
- CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
- if (FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall)
+ if (IsThisCall)
++I;
// Put arguments passed in memory into the struct.
for (; I != E; ++I) {
-
- // Leave ignored and inreg arguments alone.
- switch (I->info.getKind()) {
- case ABIArgInfo::Indirect:
- assert(I->info.getIndirectByVal());
- break;
- case ABIArgInfo::Ignore:
- continue;
- case ABIArgInfo::Direct:
- case ABIArgInfo::Extend:
- if (I->info.getInReg())
- continue;
- break;
- default:
- break;
- }
-
- addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
+ if (isArgInAlloca(I->info))
+ addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
}
FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
@@ -1107,22 +1301,12 @@ bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
return true;
switch (Triple.getOS()) {
- case llvm::Triple::AuroraUX:
case llvm::Triple::DragonFly:
case llvm::Triple::FreeBSD:
case llvm::Triple::OpenBSD:
case llvm::Triple::Bitrig:
- return true;
case llvm::Triple::Win32:
- switch (Triple.getEnvironment()) {
- case llvm::Triple::UnknownEnvironment:
- case llvm::Triple::Cygnus:
- case llvm::Triple::GNU:
- case llvm::Triple::MSVC:
- return true;
- default:
- return false;
- }
+ return true;
default:
return false;
}
@@ -1325,7 +1509,8 @@ public:
/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
class WinX86_64ABIInfo : public ABIInfo {
- ABIArgInfo classify(QualType Ty, bool IsReturnType) const;
+ ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs,
+ bool IsReturnType) const;
public:
WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
@@ -1334,12 +1519,24 @@ public:
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorTypeForVectorCall(getContext(), Ty);
+ }
+
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t NumMembers) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorCallAggregateSmallEnough(NumMembers);
+ }
};
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+ bool HasAVX;
public:
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
- : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {}
const X86_64ABIInfo &getABIInfo() const {
return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
@@ -1399,6 +1596,9 @@ public:
return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
}
+ unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
+ return HasAVX ? 32 : 16;
+ }
};
static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
@@ -1430,9 +1630,10 @@ public:
};
class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+ bool HasAVX;
public:
- WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
+ WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
+ : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)), HasAVX(HasAVX) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
return 7;
@@ -1459,6 +1660,10 @@ public:
llvm::SmallString<32> &Opt) const override {
Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
}
+
+ unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
+ return HasAVX ? 32 : 16;
+ }
};
}
@@ -1586,10 +1791,25 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
}
if (Ty->isMemberPointerType()) {
- if (Ty->isMemberFunctionPointerType() && Has64BitPointers)
- Lo = Hi = Integer;
- else
+ if (Ty->isMemberFunctionPointerType()) {
+ if (Has64BitPointers) {
+ // If Has64BitPointers, this is an {i64, i64}, so classify both
+ // Lo and Hi now.
+ Lo = Hi = Integer;
+ } else {
+ // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
+ // straddles an eightbyte boundary, Hi should be classified as well.
+ uint64_t EB_FuncPtr = (OffsetBase) / 64;
+ uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
+ if (EB_FuncPtr != EB_ThisAdj) {
+ Lo = Hi = Integer;
+ } else {
+ Current = Integer;
+ }
+ }
+ } else {
Current = Integer;
+ }
return;
}
@@ -2171,7 +2391,7 @@ GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
// the second element at offset 8. Check for this:
unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
unsigned HiAlign = TD.getABITypeAlignment(Hi);
- unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign);
+ unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign);
assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
// To handle this, we have to increase the size of the low part so that the
@@ -2190,7 +2410,7 @@ GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
}
}
- llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
+ llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr);
// Verify that the second element is at an 8-byte offset.
@@ -2267,7 +2487,7 @@ classifyReturnType(QualType RetTy) const {
assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
llvm::Type::getX86_FP80Ty(getVMContext()),
- NULL);
+ nullptr);
break;
}
@@ -2333,6 +2553,8 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(
bool isNamedArg)
const
{
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
X86_64ABIInfo::Class Lo, Hi;
classify(Ty, 0, Lo, Hi, isNamedArg);
@@ -2468,23 +2690,21 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (FI.getReturnInfo().isIndirect())
--freeIntRegs;
- bool isVariadic = FI.isVariadic();
- unsigned numRequiredArgs = 0;
- if (isVariadic)
- numRequiredArgs = FI.getRequiredArgs().getNumRequiredArgs();
+ // The chain argument effectively gives us another free register.
+ if (FI.isChainCall())
+ ++freeIntRegs;
+ unsigned NumRequiredArgs = FI.getNumRequiredArgs();
// AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
// get assigned (in left-to-right order) for passing as follows...
+ unsigned ArgNo = 0;
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it) {
- bool isNamedArg = true;
- if (isVariadic)
- isNamedArg = (it - FI.arg_begin()) <
- static_cast<signed>(numRequiredArgs);
+ it != ie; ++it, ++ArgNo) {
+ bool IsNamedArg = ArgNo < NumRequiredArgs;
unsigned neededInt, neededSSE;
it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
- neededSSE, isNamedArg);
+ neededSSE, IsNamedArg);
// AMD64-ABI 3.2.3p3: If there are no registers available for any
// eightbyte of an argument, the whole argument is passed on the
@@ -2674,7 +2894,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::Type *DoubleTy = CGF.DoubleTy;
llvm::Type *DblPtrTy =
llvm::PointerType::getUnqual(DoubleTy);
- llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, NULL);
+ llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
@@ -2717,7 +2937,8 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return ResAddr;
}
-ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const {
+ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
+ bool IsReturnType) const {
if (Ty->isVoidType())
return ABIArgInfo::getIgnore();
@@ -2725,7 +2946,9 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- uint64_t Size = getContext().getTypeSize(Ty);
+ TypeInfo Info = getContext().getTypeInfo(Ty);
+ uint64_t Width = Info.Width;
+ unsigned Align = getContext().toCharUnitsFromBits(Info.Align).getQuantity();
const RecordType *RT = Ty->getAs<RecordType>();
if (RT) {
@@ -2738,11 +2961,26 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const {
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
// FIXME: mingw-w64-gcc emits 128-bit struct as i128
- if (Size == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
+ if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
- Size));
+ Width));
}
+ // vectorcall adds the concept of a homogenous vector aggregate, similar to
+ // other targets.
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
+ if (FreeSSERegs >= NumElts) {
+ FreeSSERegs -= NumElts;
+ if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getExpand();
+ }
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+
+
if (Ty->isMemberPointerType()) {
// If the member pointer is represented by an LLVM int or ptr, pass it
// directly.
@@ -2754,25 +2992,35 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const {
if (RT || Ty->isMemberPointerType()) {
// MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
// not 1, 2, 4, or 8 bytes, must be passed by reference."
- if (Size > 64 || !llvm::isPowerOf2_64(Size))
+ if (Width > 64 || !llvm::isPowerOf2_64(Width))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
// Otherwise, coerce it to a small integer.
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
}
- if (Ty->isPromotableIntegerType())
+ // Bool type is always extended to the ABI, other builtin types are not
+ // extended.
+ const BuiltinType *BT = Ty->getAs<BuiltinType>();
+ if (BT && BT->getKind() == BuiltinType::Bool)
return ABIArgInfo::getExtend();
return ABIArgInfo::getDirect();
}
void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ bool IsVectorCall =
+ FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
+
+ // We can use up to 4 SSE return registers with vectorcall.
+ unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classify(FI.getReturnType(), true);
+ FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true);
+ // We can use up to 6 SSE register parameters with vectorcall.
+ FreeSSERegs = IsVectorCall ? 6 : 0;
for (auto &I : FI.arguments())
- I.info = classify(I.type, false);
+ I.info = classify(I.type, FreeSSERegs, false);
}
llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -2812,9 +3060,14 @@ class NaClX86_64ABIInfo : public ABIInfo {
};
class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+ bool HasAVX;
public:
- NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
- : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {}
+ NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
+ : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {
+ }
+ unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
+ return HasAVX ? 32 : 16;
+ }
};
}
@@ -2835,11 +3088,19 @@ llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// PowerPC-32
-
namespace {
-class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
+class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
public:
- PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
+ PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+ llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const override;
+};
+
+class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
// This is recovered from gcc output.
@@ -2848,10 +3109,104 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override;
+
+ unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
+ return 16; // Natural alignment for Altivec vectors.
+ }
};
}
+llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) const {
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ // TODO: Implement this. For now ignore.
+ (void)CTy;
+ return nullptr;
+ }
+
+ bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
+ bool isInt = Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
+ llvm::Type *CharPtr = CGF.Int8PtrTy;
+ llvm::Type *CharPtrPtr = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr, "gprptr");
+ llvm::Value *GPRPtrAsInt = Builder.CreatePtrToInt(GPRPtr, CGF.Int32Ty);
+ llvm::Value *FPRPtrAsInt = Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1));
+ llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr);
+ llvm::Value *OverflowAreaPtrAsInt = Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3));
+ llvm::Value *OverflowAreaPtr = Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr);
+ llvm::Value *RegsaveAreaPtrAsInt = Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4));
+ llvm::Value *RegsaveAreaPtr = Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr);
+ llvm::Value *GPR = Builder.CreateLoad(GPRPtr, false, "gpr");
+ // Align GPR when TY is i64.
+ if (isI64) {
+ llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1));
+ llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1));
+ llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1));
+ GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR);
+ }
+ llvm::Value *FPR = Builder.CreateLoad(FPRPtr, false, "fpr");
+ llvm::Value *OverflowArea = Builder.CreateLoad(OverflowAreaPtr, false, "overflow_area");
+ llvm::Value *OverflowAreaAsInt = Builder.CreatePtrToInt(OverflowArea, CGF.Int32Ty);
+ llvm::Value *RegsaveArea = Builder.CreateLoad(RegsaveAreaPtr, false, "regsave_area");
+ llvm::Value *RegsaveAreaAsInt = Builder.CreatePtrToInt(RegsaveArea, CGF.Int32Ty);
+
+ llvm::Value *CC = Builder.CreateICmpULT(isInt ? GPR : FPR,
+ Builder.getInt8(8), "cond");
+
+ llvm::Value *RegConstant = Builder.CreateMul(isInt ? GPR : FPR,
+ Builder.getInt8(isInt ? 4 : 8));
+
+ llvm::Value *OurReg = Builder.CreateAdd(RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.Int32Ty));
+
+ if (Ty->isFloatingType())
+ OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32));
+
+ llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
+ llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+
+ Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
+
+ CGF.EmitBlock(UsingRegs);
+
+ llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy);
+ // Increase the GPR/FPR indexes.
+ if (isInt) {
+ GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1));
+ Builder.CreateStore(GPR, GPRPtr);
+ } else {
+ FPR = Builder.CreateAdd(FPR, Builder.getInt8(1));
+ Builder.CreateStore(FPR, FPRPtr);
+ }
+ CGF.EmitBranch(Cont);
+
+ CGF.EmitBlock(UsingOverflow);
+
+ // Increase the overflow area.
+ llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy);
+ OverflowAreaAsInt = Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8));
+ Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr), OverflowAreaPtr);
+ CGF.EmitBranch(Cont);
+
+ CGF.EmitBlock(Cont);
+
+ llvm::PHINode *Result = CGF.Builder.CreatePHI(PTy, 2, "vaarg.addr");
+ Result->addIncoming(Result1, UsingRegs);
+ Result->addIncoming(Result2, UsingOverflow);
+
+ if (Ty->isAggregateType()) {
+ llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr, "aggrptr") ;
+ return Builder.CreateLoad(AGGPtr, false, "aggr");
+ }
+
+ return Result;
+}
+
bool
PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
@@ -2914,12 +3269,14 @@ public:
bool isPromotableTypeForABI(QualType Ty) const;
bool isAlignedParamType(QualType Ty) const;
- bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
- uint64_t &Members) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t Members) const override;
+
// TODO: We can add more logic to computeInfo to improve performance.
// Example: For aggregate arguments that fit in a register, we could
// use getDirectInReg (as is done below for structs containing a single
@@ -2964,6 +3321,10 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override;
+
+ unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
+ return 16; // Natural alignment for Altivec and VSX vectors.
+ }
};
class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
@@ -2977,6 +3338,10 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override;
+
+ unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
+ return 16; // Natural alignment for Altivec vectors.
+ }
};
}
@@ -3054,9 +3419,8 @@ PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const {
/// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
/// aggregate. Base is set to the base element type, and Members is set
/// to the number of base elements.
-bool
-PPC64_SVR4_ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
- uint64_t &Members) const {
+bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ uint64_t &Members) const {
if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
uint64_t NElements = AT->getSize().getZExtValue();
if (NElements == 0)
@@ -3070,6 +3434,22 @@ PPC64_SVR4_ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
return false;
Members = 0;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CXXRD->bases()) {
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), I.getType(), true))
+ continue;
+
+ uint64_t FldMembers;
+ if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
+ return false;
+
+ Members += FldMembers;
+ }
+ }
+
for (const auto *FD : RD->fields()) {
// Ignore (non-zero arrays of) empty records.
QualType FT = FD->getType();
@@ -3109,19 +3489,9 @@ PPC64_SVR4_ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
Ty = CT->getElementType();
}
- // Homogeneous aggregates for ELFv2 must have base types of float,
- // double, long double, or 128-bit vectors.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- if (BT->getKind() != BuiltinType::Float &&
- BT->getKind() != BuiltinType::Double &&
- BT->getKind() != BuiltinType::LongDouble)
- return false;
- } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
- if (getContext().getTypeSize(VT) != 128)
- return false;
- } else {
+ // Most ABIs only support float, double, and some vector type widths.
+ if (!isHomogeneousAggregateBaseType(Ty))
return false;
- }
// The base type must be the same for all members. Types that
// agree in both total size and mode (float vs. vector) are
@@ -3134,18 +3504,40 @@ PPC64_SVR4_ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
return false;
}
+ return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
+}
+
+bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // Homogeneous aggregates for ELFv2 must have base types of float,
+ // double, long double, or 128-bit vectors.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::Float ||
+ BT->getKind() == BuiltinType::Double ||
+ BT->getKind() == BuiltinType::LongDouble)
+ return true;
+ }
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ if (getContext().getTypeSize(VT) == 128)
+ return true;
+ }
+ return false;
+}
+bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
+ const Type *Base, uint64_t Members) const {
// Vector types require one register, floating point types require one
// or two registers depending on their size.
- uint32_t NumRegs = Base->isVectorType() ? 1 :
- (getContext().getTypeSize(Base) + 63) / 64;
+ uint32_t NumRegs =
+ Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
// Homogeneous Aggregates may occupy at most 8 registers.
- return (Members > 0 && Members * NumRegs <= 8);
+ return Members * NumRegs <= 8;
}
ABIArgInfo
PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
if (Ty->isAnyComplexType())
return ABIArgInfo::getDirect();
@@ -3252,7 +3644,7 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
llvm::Type *CoerceTy;
if (Bits > GPRBits) {
CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
- CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, NULL);
+ CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr);
} else
CoerceTy = llvm::IntegerType::get(getVMContext(),
llvm::RoundUpToAlignment(Bits, 8));
@@ -3429,76 +3821,19 @@ private:
bool isDarwinPCS() const { return Kind == DarwinPCS; }
ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &AllocatedVFP,
- bool &IsHA, unsigned &AllocatedGPR,
- bool &IsSmallAggr, bool IsNamedArg) const;
- bool isIllegalVectorType(QualType Ty) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t Members) const override;
- virtual void computeInfo(CGFunctionInfo &FI) const {
- // To correctly handle Homogeneous Aggregate, we need to keep track of the
- // number of SIMD and Floating-point registers allocated so far.
- // If the argument is an HFA or an HVA and there are sufficient unallocated
- // SIMD and Floating-point registers, then the argument is allocated to SIMD
- // and Floating-point Registers (with one register per member of the HFA or
- // HVA). Otherwise, the NSRN is set to 8.
- unsigned AllocatedVFP = 0;
-
- // To correctly handle small aggregates, we need to keep track of the number
- // of GPRs allocated so far. If the small aggregate can't all fit into
- // registers, it will be on stack. We don't allow the aggregate to be
- // partially in registers.
- unsigned AllocatedGPR = 0;
-
- // Find the number of named arguments. Variadic arguments get special
- // treatment with the Darwin ABI.
- unsigned NumRequiredArgs = (FI.isVariadic() ?
- FI.getRequiredArgs().getNumRequiredArgs() :
- FI.arg_size());
+ bool isIllegalVectorType(QualType Ty) const;
+ void computeInfo(CGFunctionInfo &FI) const override {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it) {
- unsigned PreAllocation = AllocatedVFP, PreGPR = AllocatedGPR;
- bool IsHA = false, IsSmallAggr = false;
- const unsigned NumVFPs = 8;
- const unsigned NumGPRs = 8;
- bool IsNamedArg = ((it - FI.arg_begin()) <
- static_cast<signed>(NumRequiredArgs));
- it->info = classifyArgumentType(it->type, AllocatedVFP, IsHA,
- AllocatedGPR, IsSmallAggr, IsNamedArg);
-
- // Under AAPCS the 64-bit stack slot alignment means we can't pass HAs
- // as sequences of floats since they'll get "holes" inserted as
- // padding by the back end.
- if (IsHA && AllocatedVFP > NumVFPs && !isDarwinPCS() &&
- getContext().getTypeAlign(it->type) < 64) {
- uint32_t NumStackSlots = getContext().getTypeSize(it->type);
- NumStackSlots = llvm::RoundUpToAlignment(NumStackSlots, 64) / 64;
-
- llvm::Type *CoerceTy = llvm::ArrayType::get(
- llvm::Type::getDoubleTy(getVMContext()), NumStackSlots);
- it->info = ABIArgInfo::getDirect(CoerceTy);
- }
-
- // If we do not have enough VFP registers for the HA, any VFP registers
- // that are unallocated are marked as unavailable. To achieve this, we add
- // padding of (NumVFPs - PreAllocation) floats.
- if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
- llvm::Type *PaddingTy = llvm::ArrayType::get(
- llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
- it->info.setPaddingType(PaddingTy);
- }
- // If we do not have enough GPRs for the small aggregate, any GPR regs
- // that are unallocated are marked as unavailable.
- if (IsSmallAggr && AllocatedGPR > NumGPRs && PreGPR < NumGPRs) {
- llvm::Type *PaddingTy = llvm::ArrayType::get(
- llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreGPR);
- it->info =
- ABIArgInfo::getDirect(it->info.getCoerceToType(), 0, PaddingTy);
- }
- }
+ for (auto &it : FI.arguments())
+ it.info = classifyArgumentType(it.type);
}
llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -3508,7 +3843,7 @@ private:
CodeGenFunction &CGF) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+ CodeGenFunction &CGF) const override {
return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
: EmitAAPCSVAArg(VAListAddr, Ty, CGF);
}
@@ -3529,63 +3864,34 @@ public:
};
}
-static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
- ASTContext &Context,
- uint64_t *HAMembers = nullptr);
+ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
-ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty,
- unsigned &AllocatedVFP,
- bool &IsHA,
- unsigned &AllocatedGPR,
- bool &IsSmallAggr,
- bool IsNamedArg) const {
// Handle illegal vector types here.
if (isIllegalVectorType(Ty)) {
uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 32) {
llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
- AllocatedGPR++;
return ABIArgInfo::getDirect(ResType);
}
if (Size == 64) {
llvm::Type *ResType =
llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
- AllocatedVFP++;
return ABIArgInfo::getDirect(ResType);
}
if (Size == 128) {
llvm::Type *ResType =
llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
- AllocatedVFP++;
return ABIArgInfo::getDirect(ResType);
}
- AllocatedGPR++;
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
- if (Ty->isVectorType())
- // Size of a legal vector should be either 64 or 128.
- AllocatedVFP++;
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- if (BT->getKind() == BuiltinType::Half ||
- BT->getKind() == BuiltinType::Float ||
- BT->getKind() == BuiltinType::Double ||
- BT->getKind() == BuiltinType::LongDouble)
- AllocatedVFP++;
- }
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- if (!Ty->isFloatingType() && !Ty->isVectorType()) {
- unsigned Alignment = getContext().getTypeAlign(Ty);
- if (!isDarwinPCS() && Alignment > 64)
- AllocatedGPR = llvm::RoundUpToAlignment(AllocatedGPR, Alignment / 64);
-
- int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
- AllocatedGPR += RegsNeeded;
- }
return (Ty->isPromotableIntegerType() && isDarwinPCS()
? ABIArgInfo::getExtend()
: ABIArgInfo::getDirect());
@@ -3594,9 +3900,8 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty,
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- AllocatedGPR++;
return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA ==
- CGCXXABI::RAA_DirectInMemory);
+ CGCXXABI::RAA_DirectInMemory);
}
// Empty records are always ignored on Darwin, but actually passed in C++ mode
@@ -3605,36 +3910,23 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty,
if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
return ABIArgInfo::getIgnore();
- ++AllocatedGPR;
return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
}
// Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
const Type *Base = nullptr;
uint64_t Members = 0;
- if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
- IsHA = true;
- if (!IsNamedArg && isDarwinPCS()) {
- // With the Darwin ABI, variadic arguments are always passed on the stack
- // and should not be expanded. Treat variadic HFAs as arrays of doubles.
- uint64_t Size = getContext().getTypeSize(Ty);
- llvm::Type *BaseTy = llvm::Type::getDoubleTy(getVMContext());
- return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
- }
- AllocatedVFP += Members;
- return ABIArgInfo::getExpand();
+ if (isHomogeneousAggregate(Ty, Base, Members)) {
+ return ABIArgInfo::getDirect(
+ llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
}
// Aggregates <= 16 bytes are passed directly in registers or on the stack.
uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 128) {
unsigned Alignment = getContext().getTypeAlign(Ty);
- if (!isDarwinPCS() && Alignment > 64)
- AllocatedGPR = llvm::RoundUpToAlignment(AllocatedGPR, Alignment / 64);
-
Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
- AllocatedGPR += Size / 64;
- IsSmallAggr = true;
+
// We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
// For aggregates with 16-byte alignment, we use i128.
if (Alignment < 128 && Size == 128) {
@@ -3644,7 +3936,6 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty,
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
}
- AllocatedGPR++;
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
@@ -3670,7 +3961,8 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getIgnore();
const Type *Base = nullptr;
- if (isHomogeneousAggregate(RetTy, Base, getContext()))
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(RetTy, Base, Members))
// Homogeneous Floating-point Aggregates (HFAs) are returned directly.
return ABIArgInfo::getDirect();
@@ -3698,9 +3990,46 @@ bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
return false;
}
-static llvm::Value *EmitAArch64VAArg(llvm::Value *VAListAddr, QualType Ty,
- int AllocatedGPR, int AllocatedVFP,
- bool IsIndirect, CodeGenFunction &CGF) {
+bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // Homogeneous aggregates for AAPCS64 must have base types of a floating
+ // point type or a short-vector type. This is the same as the 32-bit ABI,
+ // but with the difference that any floating-point type is allowed,
+ // including __fp16.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->isFloatingPoint())
+ return true;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned VecSize = getContext().getTypeSize(VT);
+ if (VecSize == 64 || VecSize == 128)
+ return true;
+ }
+ return false;
+}
+
+bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const {
+ return Members <= 4;
+}
+
+llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) const {
+ ABIArgInfo AI = classifyArgumentType(Ty);
+ bool IsIndirect = AI.isIndirect();
+
+ llvm::Type *BaseTy = CGF.ConvertType(Ty);
+ if (IsIndirect)
+ BaseTy = llvm::PointerType::getUnqual(BaseTy);
+ else if (AI.getCoerceToType())
+ BaseTy = AI.getCoerceToType();
+
+ unsigned NumRegs = 1;
+ if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
+ BaseTy = ArrTy->getElementType();
+ NumRegs = ArrTy->getNumElements();
+ }
+ bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
+
// The AArch64 va_list type and handling is specified in the Procedure Call
// Standard, section B.4:
//
@@ -3720,21 +4049,19 @@ static llvm::Value *EmitAArch64VAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr;
int reg_top_index;
- int RegSize;
- if (AllocatedGPR) {
- assert(!AllocatedVFP && "Arguments never split between int & VFP regs");
+ int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8;
+ if (!IsFPR) {
// 3 is the field number of __gr_offs
reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
reg_top_index = 1; // field number for __gr_top
- RegSize = 8 * AllocatedGPR;
+ RegSize = llvm::RoundUpToAlignment(RegSize, 8);
} else {
- assert(!AllocatedGPR && "Argument must go in VFP or int regs");
// 4 is the field number of __vr_offs.
reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
reg_top_index = 2; // field number for __vr_top
- RegSize = 16 * AllocatedVFP;
+ RegSize = 16 * NumRegs;
}
//=======================================
@@ -3758,7 +4085,7 @@ static llvm::Value *EmitAArch64VAArg(llvm::Value *VAListAddr, QualType Ty,
// Integer arguments may need to correct register alignment (for example a
// "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
// align __gr_offs to calculate the potential address.
- if (AllocatedGPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
+ if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
int Align = Ctx.getTypeAlign(Ty) / 8;
reg_offs = CGF.Builder.CreateAdd(
@@ -3806,8 +4133,8 @@ static llvm::Value *EmitAArch64VAArg(llvm::Value *VAListAddr, QualType Ty,
}
const Type *Base = nullptr;
- uint64_t NumMembers;
- bool IsHFA = isHomogeneousAggregate(Ty, Base, Ctx, &NumMembers);
+ uint64_t NumMembers = 0;
+ bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
if (IsHFA && NumMembers > 1) {
// Homogeneous aggregates passed in registers will have their elements split
// and stored 16-bytes apart regardless of size (they're notionally in qN,
@@ -3926,18 +4253,6 @@ static llvm::Value *EmitAArch64VAArg(llvm::Value *VAListAddr, QualType Ty,
return ResAddr;
}
-llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
-
- unsigned AllocatedGPR = 0, AllocatedVFP = 0;
- bool IsHA = false, IsSmallAggr = false;
- ABIArgInfo AI = classifyArgumentType(Ty, AllocatedVFP, IsHA, AllocatedGPR,
- IsSmallAggr, false /*IsNamedArg*/);
-
- return EmitAArch64VAArg(VAListAddr, Ty, AllocatedGPR, AllocatedVFP,
- AI.isIndirect(), CGF);
-}
-
llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
// We do not support va_arg for aggregates or illegal vector types.
@@ -3950,7 +4265,8 @@ llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType T
uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
const Type *Base = nullptr;
- bool isHA = isHomogeneousAggregate(Ty, Base, getContext());
+ uint64_t Members = 0;
+ bool isHA = isHomogeneousAggregate(Ty, Base, Members);
bool isIndirect = false;
// Arguments bigger than 16 bytes which aren't homogeneous aggregates should
@@ -4022,7 +4338,7 @@ private:
public:
ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind),
NumVFPs(16), NumGPRs(4) {
- setRuntimeCC();
+ setCCs();
resetAllocatedRegs();
}
@@ -4057,6 +4373,10 @@ private:
bool &IsCPRC) const;
bool isIllegalVectorType(QualType Ty) const;
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t Members) const override;
+
void computeInfo(CGFunctionInfo &FI) const override;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -4064,7 +4384,7 @@ private:
llvm::CallingConv::ID getLLVMDefaultCC() const;
llvm::CallingConv::ID getABIDefaultCC() const;
- void setRuntimeCC();
+ void setCCs();
void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const;
void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const;
@@ -4183,11 +4503,11 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
llvm::Type *PaddingTy = llvm::ArrayType::get(
llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs);
if (I.info.canHaveCoerceToType()) {
- I.info = ABIArgInfo::getDirect(I.info.getCoerceToType() /* type */, 0 /* offset */,
- PaddingTy);
+ I.info = ABIArgInfo::getDirect(I.info.getCoerceToType() /* type */,
+ 0 /* offset */, PaddingTy, true);
} else {
I.info = ABIArgInfo::getDirect(nullptr /* type */, 0 /* offset */,
- PaddingTy);
+ PaddingTy, true);
}
}
}
@@ -4223,7 +4543,7 @@ llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
llvm_unreachable("bad ABI kind");
}
-void ARMABIInfo::setRuntimeCC() {
+void ARMABIInfo::setCCs() {
assert(getRuntimeCC() == llvm::CallingConv::C);
// Don't muddy up the IR with a ton of explicit annotations if
@@ -4231,90 +4551,9 @@ void ARMABIInfo::setRuntimeCC() {
llvm::CallingConv::ID abiCC = getABIDefaultCC();
if (abiCC != getLLVMDefaultCC())
RuntimeCC = abiCC;
-}
-
-/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous
-/// aggregate. If HAMembers is non-null, the number of base elements
-/// contained in the type is returned through it; this is used for the
-/// recursive calls that check aggregate component types.
-static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
- ASTContext &Context, uint64_t *HAMembers) {
- uint64_t Members = 0;
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
- if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
- return false;
- Members *= AT->getSize().getZExtValue();
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return false;
-
- Members = 0;
- for (const auto *FD : RD->fields()) {
- uint64_t FldMembers;
- if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
- return false;
-
- Members = (RD->isUnion() ?
- std::max(Members, FldMembers) : Members + FldMembers);
- }
- } else {
- Members = 1;
- if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
- Members = 2;
- Ty = CT->getElementType();
- }
-
- // Homogeneous aggregates for AAPCS-VFP must have base types of float,
- // double, or 64-bit or 128-bit vectors.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- if (BT->getKind() != BuiltinType::Float &&
- BT->getKind() != BuiltinType::Double &&
- BT->getKind() != BuiltinType::LongDouble)
- return false;
- } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
- unsigned VecSize = Context.getTypeSize(VT);
- if (VecSize != 64 && VecSize != 128)
- return false;
- } else {
- return false;
- }
-
- // The base type must be the same for all members. Vector types of the
- // same total size are treated as being equivalent here.
- const Type *TyPtr = Ty.getTypePtr();
- if (!Base)
- Base = TyPtr;
-
- if (Base != TyPtr) {
- // Homogeneous aggregates are defined as containing members with the
- // same machine type. There are two cases in which two members have
- // different TypePtrs but the same machine type:
-
- // 1) Vectors of the same length, regardless of the type and number
- // of their members.
- const bool SameLengthVectors = Base->isVectorType() && TyPtr->isVectorType()
- && (Context.getTypeSize(Base) == Context.getTypeSize(TyPtr));
-
- // 2) In the 32-bit AAPCS, `double' and `long double' have the same
- // machine type. This is not the case for the 64-bit AAPCS.
- const bool SameSizeDoubles =
- ( ( Base->isSpecificBuiltinType(BuiltinType::Double)
- && TyPtr->isSpecificBuiltinType(BuiltinType::LongDouble))
- || ( Base->isSpecificBuiltinType(BuiltinType::LongDouble)
- && TyPtr->isSpecificBuiltinType(BuiltinType::Double)))
- && (Context.getTypeSize(Base) == Context.getTypeSize(TyPtr));
-
- if (!SameLengthVectors && !SameSizeDoubles)
- return false;
- }
- }
-
- // Homogeneous Aggregates can have at most 4 members of the base type.
- if (HAMembers)
- *HAMembers = Members;
- return (Members > 0 && Members <= 4);
+ BuiltinCC = (getABIKind() == APCS ?
+ llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS);
}
/// markAllocatedVFPs - update VFPRegs according to the alignment and
@@ -4382,6 +4621,9 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
// with a Base Type of a single- or double-precision floating-point type,
// 64-bit containerized vectors or 128-bit containerized vectors with one
// to four Elements.
+ bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
+
+ Ty = useFirstFieldIfTransparentUnion(Ty);
// Handle illegal vector types here.
if (isIllegalVectorType(Ty)) {
@@ -4451,8 +4693,8 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
unsigned Size = getContext().getTypeSize(Ty);
if (!IsCPRC)
markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32);
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
+ : ABIArgInfo::getDirect());
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
@@ -4464,12 +4706,12 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
- if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
+ if (IsEffectivelyAAPCS_VFP) {
// Homogeneous Aggregates need to be expanded when we can fit the aggregate
// into VFP registers.
const Type *Base = nullptr;
uint64_t Members = 0;
- if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
+ if (isHomogeneousAggregate(Ty, Base, Members)) {
assert(Base && "Base class should be set for homogeneous aggregate");
// Base can be a floating-point or a vector.
if (Base->isVectorType()) {
@@ -4485,7 +4727,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
markAllocatedVFPs(2, Members * 2);
}
IsCPRC = true;
- return ABIArgInfo::getDirect();
+ return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
}
}
@@ -4523,9 +4765,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
markAllocatedGPRs(2, SizeRegs * 2);
}
- llvm::Type *STy =
- llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
- return ABIArgInfo::getDirect(STy);
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
}
static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
@@ -4615,6 +4855,8 @@ static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
bool isVariadic) const {
+ bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
+
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
@@ -4629,8 +4871,8 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
+ : ABIArgInfo::getDirect();
}
// Are we following APCS?
@@ -4643,8 +4885,8 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
// FIXME: Consider using 2 x vector types if the back end handles them
// correctly.
if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
- getContext().getTypeSize(RetTy)));
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(
+ getVMContext(), getContext().getTypeSize(RetTy)));
// Integer like structures are returned in r0.
if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
@@ -4668,12 +4910,13 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
return ABIArgInfo::getIgnore();
// Check for homogeneous aggregates with AAPCS-VFP.
- if (getABIKind() == AAPCS_VFP && !isVariadic) {
+ if (IsEffectivelyAAPCS_VFP) {
const Type *Base = nullptr;
- if (isHomogeneousAggregate(RetTy, Base, getContext())) {
+ uint64_t Members;
+ if (isHomogeneousAggregate(RetTy, Base, Members)) {
assert(Base && "Base class should be set for homogeneous aggregate");
// Homogeneous Aggregates are returned directly.
- return ABIArgInfo::getDirect();
+ return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
}
}
@@ -4712,6 +4955,27 @@ bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
return false;
}
+bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // Homogeneous aggregates for AAPCS-VFP must have base types of float,
+ // double, or 64-bit or 128-bit vectors.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::Float ||
+ BT->getKind() == BuiltinType::Double ||
+ BT->getKind() == BuiltinType::LongDouble)
+ return true;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned VecSize = getContext().getTypeSize(VT);
+ if (VecSize == 64 || VecSize == 128)
+ return true;
+ }
+ return false;
+}
+
+bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const {
+ return Members <= 4;
+}
+
llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::Type *BP = CGF.Int8PtrTy;
@@ -4876,6 +5140,10 @@ ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
+ // Return aggregates type as indirect by value
+ if (isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getIndirect(0, /* byval */ true);
+
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
@@ -4953,9 +5221,10 @@ void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
// Get "nvvm.annotations" metadata node
llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
- llvm::Value *MDVals[] = {
- F, llvm::MDString::get(Ctx, Name),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand)};
+ llvm::Metadata *MDVals[] = {
+ llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
// Append metadata to nvvm.annotations
MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
}
@@ -5424,6 +5693,8 @@ llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
ABIArgInfo
MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
uint64_t OrigOffset = Offset;
uint64_t TySize = getContext().getTypeSize(Ty);
uint64_t Align = getContext().getTypeAlign(Ty) / 8;
@@ -5524,7 +5795,7 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect();
// O32 returns integer vectors in registers and N32/N64 returns all small
- // aggregates in registers..
+ // aggregates in registers.
if (!IsO32 ||
(RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
ABIArgInfo ArgInfo =
@@ -5562,10 +5833,13 @@ llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::Type *BP = CGF.Int8PtrTy;
llvm::Type *BPP = CGF.Int8PtrPtrTy;
- // Integer arguments are promoted 32-bit on O32 and 64-bit on N32/N64.
+ // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
+ // Pointers are also promoted in the same way but this only matters for N32.
unsigned SlotSizeInBits = IsO32 ? 32 : 64;
- if (Ty->isIntegerType() &&
- CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) {
+ unsigned PtrWidth = getTarget().getPointerWidth(0);
+ if ((Ty->isIntegerType() &&
+ CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) ||
+ (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits,
Ty->isSignedIntegerType());
}
@@ -5577,7 +5851,6 @@ llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes);
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped;
- unsigned PtrWidth = getTarget().getPointerWidth(0);
llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
if (TypeAlign > MinABIStackAlignInBytes) {
@@ -5667,20 +5940,24 @@ void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
llvm::NamedMDNode *OpenCLMetadata =
M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
- SmallVector<llvm::Value*, 5> Operands;
- Operands.push_back(F);
+ SmallVector<llvm::Metadata *, 5> Operands;
+ Operands.push_back(llvm::ConstantAsMetadata::get(F));
- Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
- llvm::APInt(32, Attr->getXDim())));
- Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
- llvm::APInt(32, Attr->getYDim())));
- Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
- llvm::APInt(32, Attr->getZDim())));
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
+ M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
+ M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
+ M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
// Add a boolean constant operand for "required" (true) or "hint" (false)
// for implementing the work_group_size_hint attr later. Currently
// always true as the hint is not yet implemented.
- Operands.push_back(llvm::ConstantInt::getTrue(Context));
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
}
}
@@ -5822,6 +6099,45 @@ llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return AddrTyped;
}
+//===----------------------------------------------------------------------===//
+// AMDGPU ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+};
+
+}
+
+void AMDGPUTargetCodeGenInfo::SetTargetAttributes(
+ const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD)
+ return;
+
+ if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
+ llvm::Function *F = cast<llvm::Function>(GV);
+ uint32_t NumVGPR = Attr->getNumVGPR();
+ if (NumVGPR != 0)
+ F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR));
+ }
+
+ if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
+ llvm::Function *F = cast<llvm::Function>(GV);
+ unsigned NumSGPR = Attr->getNumSGPR();
+ if (NumSGPR != 0)
+ F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR));
+ }
+}
+
//===----------------------------------------------------------------------===//
// SPARC v9 ABI Implementation.
@@ -6405,8 +6721,8 @@ void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
SmallStringEnc Enc;
if (getTypeString(Enc, D, CGM, TSC)) {
llvm::LLVMContext &Ctx = CGM.getModule().getContext();
- llvm::SmallVector<llvm::Value *, 2> MDVals;
- MDVals.push_back(GV);
+ llvm::SmallVector<llvm::Metadata *, 2> MDVals;
+ MDVals.push_back(llvm::ConstantAsMetadata::get(GV));
MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
llvm::NamedMDNode *MD =
CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
@@ -6424,26 +6740,25 @@ static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
const RecordDecl *RD,
const CodeGen::CodeGenModule &CGM,
TypeStringCache &TSC) {
- for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
- I != E; ++I) {
+ for (const auto *Field : RD->fields()) {
SmallStringEnc Enc;
Enc += "m(";
- Enc += I->getName();
+ Enc += Field->getName();
Enc += "){";
- if (I->isBitField()) {
+ if (Field->isBitField()) {
Enc += "b(";
llvm::raw_svector_ostream OS(Enc);
OS.resync();
- OS << I->getBitWidthValue(CGM.getContext());
+ OS << Field->getBitWidthValue(CGM.getContext());
OS.flush();
Enc += ':';
}
- if (!appendType(Enc, I->getType(), CGM, TSC))
+ if (!appendType(Enc, Field->getType(), CGM, TSC))
return false;
- if (I->isBitField())
+ if (Field->isBitField())
Enc += ')';
Enc += '}';
- FE.push_back(FieldEncoding(!I->getName().empty(), Enc));
+ FE.push_back(FieldEncoding(!Field->getName().empty(), Enc));
}
return true;
}
@@ -6752,6 +7067,14 @@ static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
// Driver code
//===----------------------------------------------------------------------===//
+const llvm::Triple &CodeGenModule::getTriple() const {
+ return getTarget().getTriple();
+}
+
+bool CodeGenModule::supportsCOMDAT() const {
+ return !getTriple().isOSBinFormatMachO();
+}
+
const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (TheTargetCodeGenInfo)
return *TheTargetCodeGenInfo;
@@ -6772,9 +7095,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_be:
- case llvm::Triple::arm64:
- case llvm::Triple::arm64_be: {
+ case llvm::Triple::aarch64_be: {
AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
if (getTarget().getABI() == "darwinpcs")
Kind = AArch64ABIInfo::DarwinPCS;
@@ -6809,16 +7130,20 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
case llvm::Triple::ppc64:
if (Triple.isOSBinFormatELF()) {
- // FIXME: Should be switchable via command-line option.
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
+ if (getTarget().getABI() == "elfv2")
+ Kind = PPC64_SVR4_ABIInfo::ELFv2;
+
return *(TheTargetCodeGenInfo =
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
} else
return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
case llvm::Triple::ppc64le: {
assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
- // FIXME: Should be switchable via command-line option.
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
+ if (getTarget().getABI() == "elfv1")
+ Kind = PPC64_SVR4_ABIInfo::ELFv1;
+
return *(TheTargetCodeGenInfo =
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
}
@@ -6840,7 +7165,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
bool IsDarwinVectorABI = Triple.isOSDarwin();
bool IsSmallStructInRegABI =
X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
- bool IsWin32FloatStructABI = Triple.isWindowsMSVCEnvironment();
+ bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
if (Triple.getOS() == llvm::Triple::Win32) {
return *(TheTargetCodeGenInfo =
@@ -6862,17 +7187,22 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
switch (Triple.getOS()) {
case llvm::Triple::Win32:
- return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
+ return *(TheTargetCodeGenInfo =
+ new WinX86_64TargetCodeGenInfo(Types, HasAVX));
case llvm::Triple::NaCl:
- return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types,
- HasAVX));
+ return *(TheTargetCodeGenInfo =
+ new NaClX86_64TargetCodeGenInfo(Types, HasAVX));
default:
- return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
- HasAVX));
+ return *(TheTargetCodeGenInfo =
+ new X86_64TargetCodeGenInfo(Types, HasAVX));
}
}
case llvm::Triple::hexagon:
return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
+ case llvm::Triple::r600:
+ return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
+ case llvm::Triple::amdgcn:
+ return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
case llvm::Triple::sparcv9:
return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
case llvm::Triple::xcore:
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index 2616820185f5..cc469d69e390 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -12,9 +12,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_TARGETINFO_H
-#define CLANG_CODEGEN_TARGETINFO_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
+#define LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
+#include "CGValue.h"
#include "clang/AST/Type.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/SmallString.h"
@@ -129,6 +130,14 @@ public:
return Ty;
}
+ /// Adds constraints and types for result registers.
+ virtual void addReturnRegisterOutputs(
+ CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue,
+ std::string &Constraints, std::vector<llvm::Type *> &ResultRegTypes,
+ std::vector<llvm::Type *> &ResultTruncRegTypes,
+ std::vector<CodeGen::LValue> &ResultRegDests, std::string &AsmString,
+ unsigned NumOutputs) const {}
+
/// doesReturnSlotInterfereWithArgs - Return true if the target uses an
/// argument slot for an 'sret' type.
virtual bool doesReturnSlotInterfereWithArgs() const { return true; }
@@ -209,7 +218,14 @@ public:
virtual void getDetectMismatchOption(llvm::StringRef Name,
llvm::StringRef Value,
llvm::SmallString<32> &Opt) const {}
+
+ /// Gets the target-specific default alignment used when an 'aligned' clause
+ /// is used with a 'simd' OpenMP directive without specifying a specific
+ /// alignment.
+ virtual unsigned getOpenMPSimdDefaultAlignment(QualType Type) const {
+ return 0;
+ }
};
}
-#endif // CLANG_CODEGEN_TARGETINFO_H
+#endif
diff --git a/lib/Driver/Action.cpp b/lib/Driver/Action.cpp
index 86a48fd8b7f9..360dbeecabf9 100644
--- a/lib/Driver/Action.cpp
+++ b/lib/Driver/Action.cpp
@@ -29,6 +29,7 @@ const char *Action::getClassName(ActionClass AC) {
case AnalyzeJobClass: return "analyzer";
case MigrateJobClass: return "migrator";
case CompileJobClass: return "compiler";
+ case BackendJobClass: return "backend";
case AssembleJobClass: return "assembler";
case LinkJobClass: return "linker";
case LipoJobClass: return "lipo";
@@ -48,15 +49,15 @@ InputAction::InputAction(const Arg &_Input, types::ID _Type)
void BindArchAction::anchor() {}
-BindArchAction::BindArchAction(Action *Input, const char *_ArchName)
- : Action(BindArchClass, Input, Input->getType()), ArchName(_ArchName) {
-}
+BindArchAction::BindArchAction(std::unique_ptr<Action> Input,
+ const char *_ArchName)
+ : Action(BindArchClass, std::move(Input)), ArchName(_ArchName) {}
void JobAction::anchor() {}
-JobAction::JobAction(ActionClass Kind, Action *Input, types::ID Type)
- : Action(Kind, Input, Type) {
-}
+JobAction::JobAction(ActionClass Kind, std::unique_ptr<Action> Input,
+ types::ID Type)
+ : Action(Kind, std::move(Input), Type) {}
JobAction::JobAction(ActionClass Kind, const ActionList &Inputs, types::ID Type)
: Action(Kind, Inputs, Type) {
@@ -64,39 +65,45 @@ JobAction::JobAction(ActionClass Kind, const ActionList &Inputs, types::ID Type)
void PreprocessJobAction::anchor() {}
-PreprocessJobAction::PreprocessJobAction(Action *Input, types::ID OutputType)
- : JobAction(PreprocessJobClass, Input, OutputType) {
-}
+PreprocessJobAction::PreprocessJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(PreprocessJobClass, std::move(Input), OutputType) {}
void PrecompileJobAction::anchor() {}
-PrecompileJobAction::PrecompileJobAction(Action *Input, types::ID OutputType)
- : JobAction(PrecompileJobClass, Input, OutputType) {
-}
+PrecompileJobAction::PrecompileJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(PrecompileJobClass, std::move(Input), OutputType) {}
void AnalyzeJobAction::anchor() {}
-AnalyzeJobAction::AnalyzeJobAction(Action *Input, types::ID OutputType)
- : JobAction(AnalyzeJobClass, Input, OutputType) {
-}
+AnalyzeJobAction::AnalyzeJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(AnalyzeJobClass, std::move(Input), OutputType) {}
void MigrateJobAction::anchor() {}
-MigrateJobAction::MigrateJobAction(Action *Input, types::ID OutputType)
- : JobAction(MigrateJobClass, Input, OutputType) {
-}
+MigrateJobAction::MigrateJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(MigrateJobClass, std::move(Input), OutputType) {}
void CompileJobAction::anchor() {}
-CompileJobAction::CompileJobAction(Action *Input, types::ID OutputType)
- : JobAction(CompileJobClass, Input, OutputType) {
-}
+CompileJobAction::CompileJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(CompileJobClass, std::move(Input), OutputType) {}
+
+void BackendJobAction::anchor() {}
+
+BackendJobAction::BackendJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(BackendJobClass, std::move(Input), OutputType) {}
void AssembleJobAction::anchor() {}
-AssembleJobAction::AssembleJobAction(Action *Input, types::ID OutputType)
- : JobAction(AssembleJobClass, Input, OutputType) {
-}
+AssembleJobAction::AssembleJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(AssembleJobClass, std::move(Input), OutputType) {}
void LinkJobAction::anchor() {}
@@ -118,9 +125,9 @@ DsymutilJobAction::DsymutilJobAction(ActionList &Inputs, types::ID Type)
void VerifyJobAction::anchor() {}
-VerifyJobAction::VerifyJobAction(ActionClass Kind, Action *Input,
- types::ID Type)
- : JobAction(Kind, Input, Type) {
+VerifyJobAction::VerifyJobAction(ActionClass Kind,
+ std::unique_ptr<Action> Input, types::ID Type)
+ : JobAction(Kind, std::move(Input), Type) {
assert((Kind == VerifyDebugInfoJobClass || Kind == VerifyPCHJobClass) &&
"ActionClass is not a valid VerifyJobAction");
}
@@ -134,13 +141,12 @@ VerifyJobAction::VerifyJobAction(ActionClass Kind, ActionList &Inputs,
void VerifyDebugInfoJobAction::anchor() {}
-VerifyDebugInfoJobAction::VerifyDebugInfoJobAction(Action *Input,
- types::ID Type)
- : VerifyJobAction(VerifyDebugInfoJobClass, Input, Type) {
-}
+VerifyDebugInfoJobAction::VerifyDebugInfoJobAction(
+ std::unique_ptr<Action> Input, types::ID Type)
+ : VerifyJobAction(VerifyDebugInfoJobClass, std::move(Input), Type) {}
void VerifyPCHJobAction::anchor() {}
-VerifyPCHJobAction::VerifyPCHJobAction(Action *Input, types::ID Type)
- : VerifyJobAction(VerifyPCHJobClass, Input, Type) {
-}
+VerifyPCHJobAction::VerifyPCHJobAction(std::unique_ptr<Action> Input,
+ types::ID Type)
+ : VerifyJobAction(VerifyPCHJobClass, std::move(Input), Type) {}
diff --git a/lib/Driver/CMakeLists.txt b/lib/Driver/CMakeLists.txt
index 33db5e9a9e17..412840b6ea6e 100644
--- a/lib/Driver/CMakeLists.txt
+++ b/lib/Driver/CMakeLists.txt
@@ -6,16 +6,17 @@ set(LLVM_LINK_COMPONENTS
add_clang_library(clangDriver
Action.cpp
Compilation.cpp
+ CrossWindowsToolChain.cpp
Driver.cpp
DriverOptions.cpp
Job.cpp
Multilib.cpp
+ MSVCToolChain.cpp
Phases.cpp
SanitizerArgs.cpp
Tool.cpp
ToolChain.cpp
ToolChains.cpp
- WindowsToolChain.cpp
Tools.cpp
Types.cpp
diff --git a/lib/Driver/Compilation.cpp b/lib/Driver/Compilation.cpp
index 49b7edd96659..2bcbd5cf943c 100644
--- a/lib/Driver/Compilation.cpp
+++ b/lib/Driver/Compilation.cpp
@@ -88,7 +88,7 @@ bool Compilation::CleanupFile(const char *File, bool IssueErrors) const {
// Failure is only failure if the file exists and is "regular". We checked
// for it being regular before, and llvm::sys::fs::remove ignores ENOENT,
// so we don't need to check again.
-
+
if (IssueErrors)
getDriver().Diag(clang::diag::err_drv_unable_to_remove_file)
<< EC.message();
@@ -131,13 +131,13 @@ int Compilation::ExecuteCommand(const Command &C,
// Follow gcc implementation of CC_PRINT_OPTIONS; we could also cache the
// output stream.
if (getDriver().CCPrintOptions && getDriver().CCPrintOptionsFilename) {
- std::string Error;
- OS = new llvm::raw_fd_ostream(getDriver().CCPrintOptionsFilename, Error,
+ std::error_code EC;
+ OS = new llvm::raw_fd_ostream(getDriver().CCPrintOptionsFilename, EC,
llvm::sys::fs::F_Append |
llvm::sys::fs::F_Text);
- if (!Error.empty()) {
+ if (EC) {
getDriver().Diag(clang::diag::err_drv_cc_print_options_failure)
- << Error;
+ << EC.message();
FailingCommand = &C;
delete OS;
return 1;
@@ -202,9 +202,8 @@ void Compilation::ExecuteJob(const Job &J,
FailingCommands.push_back(std::make_pair(Res, FailingCommand));
} else {
const JobList *Jobs = cast<JobList>(&J);
- for (JobList::const_iterator it = Jobs->begin(), ie = Jobs->end();
- it != ie; ++it)
- ExecuteJob(**it, FailingCommands);
+ for (const auto &Job : *Jobs)
+ ExecuteJob(Job, FailingCommands);
}
}
@@ -233,8 +232,8 @@ void Compilation::initCompilationForDiagnostics() {
// Redirect stdout/stderr to /dev/null.
Redirects = new const StringRef*[3]();
Redirects[0] = nullptr;
- Redirects[1] = new const StringRef();
- Redirects[2] = new const StringRef();
+ Redirects[1] = new StringRef();
+ Redirects[2] = new StringRef();
}
StringRef Compilation::getSysRoot() const {
diff --git a/lib/Driver/CrossWindowsToolChain.cpp b/lib/Driver/CrossWindowsToolChain.cpp
new file mode 100644
index 000000000000..03fe41b74fe0
--- /dev/null
+++ b/lib/Driver/CrossWindowsToolChain.cpp
@@ -0,0 +1,117 @@
+//===--- CrossWindowsToolChain.cpp - Cross Windows Tool Chain -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ToolChains.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Option/ArgList.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+
+CrossWindowsToolChain::CrossWindowsToolChain(const Driver &D,
+ const llvm::Triple &T,
+ const llvm::opt::ArgList &Args)
+ : Generic_GCC(D, T, Args) {
+ if (GetCXXStdlibType(Args) == ToolChain::CST_Libstdcxx) {
+ const std::string &SysRoot = D.SysRoot;
+
+ // libstdc++ resides in /usr/lib, but depends on libgcc which is placed in
+ // /usr/lib/gcc.
+ getFilePaths().push_back(SysRoot + "/usr/lib");
+ getFilePaths().push_back(SysRoot + "/usr/lib/gcc");
+ }
+}
+
+bool CrossWindowsToolChain::IsUnwindTablesDefault() const {
+ // FIXME: all non-x86 targets need unwind tables, however, LLVM currently does
+ // not know how to emit them.
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool CrossWindowsToolChain::isPICDefault() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool CrossWindowsToolChain::isPIEDefault() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool CrossWindowsToolChain::isPICDefaultForced() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+void CrossWindowsToolChain::
+AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ const std::string &SysRoot = D.SysRoot;
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/local/include");
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> ResourceDir(D.ResourceDir);
+ llvm::sys::path::append(ResourceDir, "include");
+ addSystemInclude(DriverArgs, CC1Args, ResourceDir.str());
+ }
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
+}
+
+void CrossWindowsToolChain::
+AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const llvm::Triple &Triple = getTriple();
+ const std::string &SysRoot = getDriver().SysRoot;
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include/c++/v1");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include/c++");
+ addSystemInclude(DriverArgs, CC1Args,
+ SysRoot + "/usr/include/c++/" + Triple.str());
+ addSystemInclude(DriverArgs, CC1Args,
+ SysRoot + "/usr/include/c++/backwards");
+ }
+}
+
+void CrossWindowsToolChain::
+AddCXXStdlibLibArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ CC1Args.push_back("-lc++");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ CC1Args.push_back("-lstdc++");
+ CC1Args.push_back("-lmingw32");
+ CC1Args.push_back("-lmingwex");
+ CC1Args.push_back("-lgcc");
+ CC1Args.push_back("-lmoldname");
+ CC1Args.push_back("-lmingw32");
+ break;
+ }
+}
+
+Tool *CrossWindowsToolChain::buildLinker() const {
+ return new tools::CrossWindows::Link(*this);
+}
+
+Tool *CrossWindowsToolChain::buildAssembler() const {
+ return new tools::CrossWindows::Assemble(*this);
+}
+
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index ef26bfacdd64..1664d0d49d46 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -50,7 +50,6 @@ Driver::Driver(StringRef ClangExecutable,
: Opts(createDriverOptTable()), Diags(Diags), Mode(GCCMode),
ClangExecutable(ClangExecutable), SysRoot(DEFAULT_SYSROOT),
UseStdLib(true), DefaultTargetTriple(DefaultTargetTriple),
- DefaultImageName("a.out"),
DriverTitle("clang LLVM compiler"),
CCPrintOptionsFilename(nullptr), CCPrintHeadersFilename(nullptr),
CCLogDiagnosticsFilename(nullptr),
@@ -65,10 +64,13 @@ Driver::Driver(StringRef ClangExecutable,
// Compute the path to the resource directory.
StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
SmallString<128> P(Dir);
- if (ClangResourceDir != "")
+ if (ClangResourceDir != "") {
llvm::sys::path::append(P, ClangResourceDir);
- else
- llvm::sys::path::append(P, "..", "lib", "clang", CLANG_VERSION_STRING);
+ } else {
+ StringRef ClangLibdirSuffix(CLANG_LIBDIR_SUFFIX);
+ llvm::sys::path::append(P, "..", Twine("lib") + ClangLibdirSuffix, "clang",
+ CLANG_VERSION_STRING);
+ }
ResourceDir = P.str();
}
@@ -83,6 +85,9 @@ void Driver::ParseDriverMode(ArrayRef<const char *> Args) {
getOpts().getOption(options::OPT_driver_mode).getPrefixedName();
for (size_t I = 0, E = Args.size(); I != E; ++I) {
+ // Ingore nullptrs, they are response file's EOL markers
+ if (Args[I] == nullptr)
+ continue;
const StringRef Arg = Args[I];
if (!Arg.startswith(OptName))
continue;
@@ -102,7 +107,7 @@ void Driver::ParseDriverMode(ArrayRef<const char *> Args) {
}
}
-InputArgList *Driver::ParseArgStrings(ArrayRef<const char *> ArgList) {
+InputArgList *Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings) {
llvm::PrettyStackTraceString CrashInfo("Command line argument parsing");
unsigned IncludedFlagsBitmask;
@@ -111,7 +116,7 @@ InputArgList *Driver::ParseArgStrings(ArrayRef<const char *> ArgList) {
getIncludeExcludeOptionFlagMasks();
unsigned MissingArgIndex, MissingArgCount;
- InputArgList *Args = getOpts().ParseArgs(ArgList.begin(), ArgList.end(),
+ InputArgList *Args = getOpts().ParseArgs(ArgStrings.begin(), ArgStrings.end(),
MissingArgIndex, MissingArgCount,
IncludedFlagsBitmask,
ExcludedFlagsBitmask);
@@ -122,9 +127,7 @@ InputArgList *Driver::ParseArgStrings(ArrayRef<const char *> ArgList) {
<< Args->getArgString(MissingArgIndex) << MissingArgCount;
// Check for unsupported options.
- for (ArgList::const_iterator it = Args->begin(), ie = Args->end();
- it != ie; ++it) {
- Arg *A = *it;
+ for (const Arg *A : *Args) {
if (A->getOption().hasFlag(options::Unsupported)) {
Diag(clang::diag::err_drv_unsupported_opt) << A->getAsString(*Args);
continue;
@@ -162,7 +165,7 @@ const {
(PhaseArg = DAL.getLastArg(options::OPT__SLASH_P))) {
FinalPhase = phases::Preprocess;
- // -{fsyntax-only,-analyze,emit-ast,S} only run up to the compiler.
+ // -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
} else if ((PhaseArg = DAL.getLastArg(options::OPT_fsyntax_only)) ||
(PhaseArg = DAL.getLastArg(options::OPT_module_file_info)) ||
(PhaseArg = DAL.getLastArg(options::OPT_verify_pch)) ||
@@ -171,10 +174,13 @@ const {
(PhaseArg = DAL.getLastArg(options::OPT__migrate)) ||
(PhaseArg = DAL.getLastArg(options::OPT__analyze,
options::OPT__analyze_auto)) ||
- (PhaseArg = DAL.getLastArg(options::OPT_emit_ast)) ||
- (PhaseArg = DAL.getLastArg(options::OPT_S))) {
+ (PhaseArg = DAL.getLastArg(options::OPT_emit_ast))) {
FinalPhase = phases::Compile;
+ // -S only runs up to the backend.
+ } else if ((PhaseArg = DAL.getLastArg(options::OPT_S))) {
+ FinalPhase = phases::Backend;
+
// -c only runs up to the assembler.
} else if ((PhaseArg = DAL.getLastArg(options::OPT_c))) {
FinalPhase = phases::Assemble;
@@ -202,10 +208,7 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
DerivedArgList *DAL = new DerivedArgList(Args);
bool HasNostdlib = Args.hasArg(options::OPT_nostdlib);
- for (ArgList::const_iterator it = Args.begin(),
- ie = Args.end(); it != ie; ++it) {
- const Arg *A = *it;
-
+ for (Arg *A : Args) {
// Unfortunately, we have to parse some forwarding options (-Xassembler,
// -Xlinker, -Xpreprocessor) because we either integrate their functionality
// (assembler and preprocessor), or bypass a previous driver ('collect2').
@@ -271,7 +274,7 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
continue;
}
- DAL->append(*it);
+ DAL->append(A);
}
// Add a default value of -mlinker-version=, if one was given and the user
@@ -400,13 +403,13 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// preprocessed source file(s). Request that the developer attach the
// diagnostic information to a bug report.
void Driver::generateCompilationDiagnostics(Compilation &C,
- const Command *FailingCommand) {
+ const Command &FailingCommand) {
if (C.getArgs().hasArg(options::OPT_fno_crash_diagnostics))
return;
// Don't try to generate diagnostics for link or dsymutil jobs.
- if (FailingCommand && (FailingCommand->getCreator().isLinkJob() ||
- FailingCommand->getCreator().isDsymutilJob()))
+ if (FailingCommand.getCreator().isLinkJob() ||
+ FailingCommand.getCreator().isDsymutilJob())
return;
// Print the version of the compiler.
@@ -421,15 +424,7 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
CCGenDiagnostics = true;
// Save the original job command(s).
- std::string Cmd;
- llvm::raw_string_ostream OS(Cmd);
- if (FailingCommand)
- FailingCommand->Print(OS, "\n", /*Quote*/ false, /*CrashReport*/ true);
- else
- // Crash triggered by FORCE_CLANG_DIAGNOSTICS_CRASH, which doesn't have an
- // associated FailingCommand, so just pass all jobs.
- C.getJobs().Print(OS, "\n", /*Quote*/ false, /*CrashReport*/ true);
- OS.flush();
+ Command Cmd = FailingCommand;
// Keep track of whether we produce any errors while trying to produce
// preprocessed sources.
@@ -473,9 +468,7 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
// Don't attempt to generate preprocessed files if multiple -arch options are
// used, unless they're all duplicates.
llvm::StringSet<> ArchNames;
- for (ArgList::const_iterator it = C.getArgs().begin(), ie = C.getArgs().end();
- it != ie; ++it) {
- Arg *A = *it;
+ for (const Arg *A : C.getArgs()) {
if (A->getOption().matches(options::OPT_arch)) {
StringRef ArchName = A->getValue();
ArchNames.insert(ArchName);
@@ -509,70 +502,86 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
SmallVector<std::pair<int, const Command *>, 4> FailingCommands;
C.ExecuteJob(C.getJobs(), FailingCommands);
- // If the command succeeded, we are done.
- if (FailingCommands.empty()) {
+ // If any of the preprocessing commands failed, clean up and exit.
+ if (!FailingCommands.empty()) {
+ if (!C.getArgs().hasArg(options::OPT_save_temps))
+ C.CleanupFileList(C.getTempFiles(), true);
+
Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s).";
+ return;
+ }
+
+ const ArgStringList &TempFiles = C.getTempFiles();
+ if (TempFiles.empty()) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s).";
+ return;
+ }
+
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "\n********************\n\n"
- "PLEASE ATTACH THE FOLLOWING FILES TO THE BUG REPORT:\n"
- "Preprocessed source(s) and associated run script(s) are located at:";
- ArgStringList Files = C.getTempFiles();
- for (ArgStringList::const_iterator it = Files.begin(), ie = Files.end();
- it != ie; ++it) {
- Diag(clang::diag::note_drv_command_failed_diag_msg) << *it;
- std::string Script = StringRef(*it).rsplit('.').first;
+ "PLEASE ATTACH THE FOLLOWING FILES TO THE BUG REPORT:\n"
+ "Preprocessed source(s) and associated run script(s) are located at:";
+
+ SmallString<128> VFS;
+ for (const char *TempFile : TempFiles) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg) << TempFile;
+ if (StringRef(TempFile).endswith(".cache")) {
// In some cases (modules) we'll dump extra data to help with reproducing
// the crash into a directory next to the output.
- SmallString<128> VFS;
- if (llvm::sys::fs::exists(Script + ".cache")) {
- Diag(clang::diag::note_drv_command_failed_diag_msg)
- << Script + ".cache";
- VFS = llvm::sys::path::filename(Script + ".cache");
- llvm::sys::path::append(VFS, "vfs", "vfs.yaml");
- }
-
- std::string Err;
- Script += ".sh";
- llvm::raw_fd_ostream ScriptOS(Script.c_str(), Err, llvm::sys::fs::F_Excl);
- if (!Err.empty()) {
- Diag(clang::diag::note_drv_command_failed_diag_msg)
- << "Error generating run script: " + Script + " " + Err;
- } else {
- // Replace the original filename with the preprocessed one.
- size_t I, E;
- I = Cmd.find("-main-file-name ");
- assert (I != std::string::npos && "Expected to find -main-file-name");
- I += 16;
- E = Cmd.find(" ", I);
- assert (E != std::string::npos && "-main-file-name missing argument?");
- StringRef OldFilename = StringRef(Cmd).slice(I, E);
- StringRef NewFilename = llvm::sys::path::filename(*it);
- I = StringRef(Cmd).rfind(OldFilename);
- E = I + OldFilename.size();
- I = Cmd.rfind(" ", I) + 1;
- Cmd.replace(I, E - I, NewFilename.data(), NewFilename.size());
- if (!VFS.empty()) {
- // Add the VFS overlay to the reproduction script.
- I += NewFilename.size();
- Cmd.insert(I, std::string(" -ivfsoverlay ") + VFS.c_str());
- }
- ScriptOS << Cmd;
- Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
- }
+ VFS = llvm::sys::path::filename(TempFile);
+ llvm::sys::path::append(VFS, "vfs", "vfs.yaml");
}
+ }
+
+ // Assume associated files are based off of the first temporary file.
+ CrashReportInfo CrashInfo(TempFiles[0], VFS);
+
+ std::string Script = CrashInfo.Filename.rsplit('.').first.str() + ".sh";
+ std::error_code EC;
+ llvm::raw_fd_ostream ScriptOS(Script, EC, llvm::sys::fs::F_Excl);
+ if (EC) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
- << "\n\n********************";
+ << "Error generating run script: " + Script + " " + EC.message();
} else {
- // Failure, remove preprocessed files.
- if (!C.getArgs().hasArg(options::OPT_save_temps))
- C.CleanupFileList(C.getTempFiles(), true);
+ Cmd.Print(ScriptOS, "\n", /*Quote=*/true, &CrashInfo);
+ Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
+ }
- Diag(clang::diag::note_drv_command_failed_diag_msg)
- << "Error generating preprocessed source(s).";
+ for (const auto &A : C.getArgs().filtered(options::OPT_frewrite_map_file,
+ options::OPT_frewrite_map_file_EQ))
+ Diag(clang::diag::note_drv_command_failed_diag_msg) << A->getValue();
+
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "\n\n********************";
+}
+
+void Driver::setUpResponseFiles(Compilation &C, Job &J) {
+ if (JobList *Jobs = dyn_cast<JobList>(&J)) {
+ for (auto &Job : *Jobs)
+ setUpResponseFiles(C, Job);
+ return;
}
+
+ Command *CurCommand = dyn_cast<Command>(&J);
+ if (!CurCommand)
+ return;
+
+ // Since argumentsFitWithinSystemLimits() may underestimate system's capacity
+ // if the tool does not support response files, there is a chance/ that things
+ // will just work without a response file, so we silently just skip it.
+ if (CurCommand->getCreator().getResponseFilesSupport() == Tool::RF_None ||
+ llvm::sys::argumentsFitWithinSystemLimits(CurCommand->getArguments()))
+ return;
+
+ std::string TmpName = GetTemporaryPath("response", "txt");
+ CurCommand->setResponseFile(C.addTempFile(C.getArgs().MakeArgString(
+ TmpName.c_str())));
}
-int Driver::ExecuteCompilation(const Compilation &C,
- SmallVectorImpl< std::pair<int, const Command *> > &FailingCommands) const {
+int Driver::ExecuteCompilation(Compilation &C,
+ SmallVectorImpl< std::pair<int, const Command *> > &FailingCommands) {
// Just print if -### was present.
if (C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
C.getJobs().Print(llvm::errs(), "\n", true);
@@ -583,6 +592,9 @@ int Driver::ExecuteCompilation(const Compilation &C,
if (Diags.hasErrorOccurred())
return 1;
+ // Set up response file names for each command, if necessary
+ setUpResponseFiles(C, C.getJobs());
+
C.ExecuteJob(C.getJobs(), FailingCommands);
// Remove temp files.
@@ -653,9 +665,13 @@ void Driver::PrintVersion(const Compilation &C, raw_ostream &OS) const {
OS << "Target: " << TC.getTripleString() << '\n';
// Print the threading model.
- //
- // FIXME: Implement correctly.
- OS << "Thread model: " << "posix" << '\n';
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_mthread_model)) {
+ // Don't print if the ToolChain would have barfed on it already
+ if (TC.isThreadModelSupported(A->getValue()))
+ OS << "Thread model: " << A->getValue();
+ } else
+ OS << "Thread model: " << TC.getThreadModel();
+ OS << '\n';
}
/// PrintDiagnosticCategories - Implement the --print-diagnostic-categories
@@ -836,7 +852,9 @@ void Driver::PrintActions(const Compilation &C) const {
/// \brief Check whether the given input tree contains any compilation or
/// assembly actions.
static bool ContainsCompileOrAssembleAction(const Action *A) {
- if (isa<CompileJobAction>(A) || isa<AssembleJobAction>(A))
+ if (isa<CompileJobAction>(A) ||
+ isa<BackendJobAction>(A) ||
+ isa<AssembleJobAction>(A))
return true;
for (Action::const_iterator it = A->begin(), ie = A->end(); it != ie; ++it)
@@ -855,10 +873,7 @@ void Driver::BuildUniversalActions(const ToolChain &TC,
// be handled once (in the order seen).
llvm::StringSet<> ArchNames;
SmallVector<const char *, 4> Archs;
- for (ArgList::const_iterator it = Args.begin(), ie = Args.end();
- it != ie; ++it) {
- Arg *A = *it;
-
+ for (Arg *A : Args) {
if (A->getOption().matches(options::OPT_arch)) {
// Validate the option here; we don't save the type here because its
// particular spelling may participate in other driver choices.
@@ -871,7 +886,7 @@ void Driver::BuildUniversalActions(const ToolChain &TC,
}
A->claim();
- if (ArchNames.insert(A->getValue()))
+ if (ArchNames.insert(A->getValue()).second)
Archs.push_back(A->getValue());
}
}
@@ -901,7 +916,8 @@ void Driver::BuildUniversalActions(const ToolChain &TC,
ActionList Inputs;
for (unsigned i = 0, e = Archs.size(); i != e; ++i) {
- Inputs.push_back(new BindArchAction(Act, Archs[i]));
+ Inputs.push_back(
+ new BindArchAction(std::unique_ptr<Action>(Act), Archs[i]));
if (i != 0)
Inputs.back()->setOwnsInputs(false);
}
@@ -932,9 +948,9 @@ void Driver::BuildUniversalActions(const ToolChain &TC,
// Verify the debug info output.
if (Args.hasArg(options::OPT_verify_debug_info)) {
- Action *VerifyInput = Actions.back();
+ std::unique_ptr<Action> VerifyInput(Actions.back());
Actions.pop_back();
- Actions.push_back(new VerifyDebugInfoJobAction(VerifyInput,
+ Actions.push_back(new VerifyDebugInfoJobAction(std::move(VerifyInput),
types::TY_Nothing));
}
}
@@ -981,8 +997,8 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
Arg *InputTypeArg = nullptr;
// The last /TC or /TP option sets the input type to C or C++ globally.
- if (Arg *TCTP = Args.getLastArg(options::OPT__SLASH_TC,
- options::OPT__SLASH_TP)) {
+ if (Arg *TCTP = Args.getLastArgNoClaim(options::OPT__SLASH_TC,
+ options::OPT__SLASH_TP)) {
InputTypeArg = TCTP;
InputType = TCTP->getOption().matches(options::OPT__SLASH_TC)
? types::TY_C : types::TY_CXX;
@@ -1005,10 +1021,7 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
assert(!Args.hasArg(options::OPT_x) && "-x and /TC or /TP is not allowed");
}
- for (ArgList::const_iterator it = Args.begin(), ie = Args.end();
- it != ie; ++it) {
- Arg *A = *it;
-
+ for (Arg *A : Args) {
if (A->getOption().getKind() == Option::InputClass) {
const char *Value = A->getValue();
types::ID Ty = types::TY_INVALID;
@@ -1070,8 +1083,17 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
}
} else {
assert(InputTypeArg && "InputType set w/o InputTypeArg");
- InputTypeArg->claim();
- Ty = InputType;
+ if (!InputTypeArg->getOption().matches(options::OPT_x)) {
+ // If emulating cl.exe, make sure that /TC and /TP don't affect input
+ // object files.
+ const char *Ext = strrchr(Value, '.');
+ if (Ext && TC.LookupTypeForExtension(Ext + 1) == types::TY_Object)
+ Ty = types::TY_Object;
+ }
+ if (Ty == types::TY_INVALID) {
+ Ty = InputType;
+ InputTypeArg->claim();
+ }
}
if (DiagnoseInputExistence(*this, Args, Value))
@@ -1142,11 +1164,8 @@ void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args,
// Diagnose misuse of /Fo.
if (Arg *A = Args.getLastArg(options::OPT__SLASH_Fo)) {
StringRef V = A->getValue();
- if (V.empty()) {
- // It has to have a value.
- Diag(clang::diag::err_drv_missing_argument) << A->getSpelling() << 1;
- Args.eraseArg(options::OPT__SLASH_Fo);
- } else if (Inputs.size() > 1 && !llvm::sys::path::is_separator(V.back())) {
+ if (Inputs.size() > 1 && !V.empty() &&
+ !llvm::sys::path::is_separator(V.back())) {
// Check whether /Fo tries to name an output file for multiple inputs.
Diag(clang::diag::err_drv_out_file_argument_with_multiple_sources)
<< A->getSpelling() << V;
@@ -1157,7 +1176,8 @@ void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args,
// Diagnose misuse of /Fa.
if (Arg *A = Args.getLastArg(options::OPT__SLASH_Fa)) {
StringRef V = A->getValue();
- if (Inputs.size() > 1 && !llvm::sys::path::is_separator(V.back())) {
+ if (Inputs.size() > 1 && !V.empty() &&
+ !llvm::sys::path::is_separator(V.back())) {
// Check whether /Fa tries to name an asm file for multiple inputs.
Diag(clang::diag::err_drv_out_file_argument_with_multiple_sources)
<< A->getSpelling() << V;
@@ -1165,12 +1185,12 @@ void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args,
}
}
- // Diagnose misuse of /Fe.
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_Fe)) {
+ // Diagnose misuse of /o.
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_o)) {
if (A->getValue()[0] == '\0') {
// It has to have a value.
Diag(clang::diag::err_drv_missing_argument) << A->getSpelling() << 1;
- Args.eraseArg(options::OPT__SLASH_Fe);
+ Args.eraseArg(options::OPT__SLASH_o);
}
}
@@ -1244,7 +1264,7 @@ void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args,
continue;
// Otherwise construct the appropriate action.
- Current.reset(ConstructPhaseAction(Args, Phase, Current.release()));
+ Current = ConstructPhaseAction(Args, Phase, std::move(Current));
if (Current->getType() == types::TY_Nothing)
break;
}
@@ -1269,8 +1289,9 @@ void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args,
Args.ClaimAllArgs(options::OPT_cl_ignored_Group);
}
-Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
- Action *Input) const {
+std::unique_ptr<Action>
+Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
+ std::unique_ptr<Action> Input) const {
llvm::PrettyStackTraceString CrashInfo("Constructing phase actions");
// Build the appropriate action.
switch (Phase) {
@@ -1289,7 +1310,7 @@ Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
assert(OutputTy != types::TY_INVALID &&
"Cannot preprocess this input type!");
}
- return new PreprocessJobAction(Input, OutputTy);
+ return llvm::make_unique<PreprocessJobAction>(std::move(Input), OutputTy);
}
case phases::Precompile: {
types::ID OutputTy = types::TY_PCH;
@@ -1297,39 +1318,53 @@ Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
// Syntax checks should not emit a PCH file
OutputTy = types::TY_Nothing;
}
- return new PrecompileJobAction(Input, OutputTy);
+ return llvm::make_unique<PrecompileJobAction>(std::move(Input), OutputTy);
}
case phases::Compile: {
- if (Args.hasArg(options::OPT_fsyntax_only)) {
- return new CompileJobAction(Input, types::TY_Nothing);
- } else if (Args.hasArg(options::OPT_rewrite_objc)) {
- return new CompileJobAction(Input, types::TY_RewrittenObjC);
- } else if (Args.hasArg(options::OPT_rewrite_legacy_objc)) {
- return new CompileJobAction(Input, types::TY_RewrittenLegacyObjC);
- } else if (Args.hasArg(options::OPT__analyze, options::OPT__analyze_auto)) {
- return new AnalyzeJobAction(Input, types::TY_Plist);
- } else if (Args.hasArg(options::OPT__migrate)) {
- return new MigrateJobAction(Input, types::TY_Remap);
- } else if (Args.hasArg(options::OPT_emit_ast)) {
- return new CompileJobAction(Input, types::TY_AST);
- } else if (Args.hasArg(options::OPT_module_file_info)) {
- return new CompileJobAction(Input, types::TY_ModuleFile);
- } else if (Args.hasArg(options::OPT_verify_pch)) {
- return new VerifyPCHJobAction(Input, types::TY_Nothing);
- } else if (IsUsingLTO(Args)) {
+ if (Args.hasArg(options::OPT_fsyntax_only))
+ return llvm::make_unique<CompileJobAction>(std::move(Input),
+ types::TY_Nothing);
+ if (Args.hasArg(options::OPT_rewrite_objc))
+ return llvm::make_unique<CompileJobAction>(std::move(Input),
+ types::TY_RewrittenObjC);
+ if (Args.hasArg(options::OPT_rewrite_legacy_objc))
+ return llvm::make_unique<CompileJobAction>(std::move(Input),
+ types::TY_RewrittenLegacyObjC);
+ if (Args.hasArg(options::OPT__analyze, options::OPT__analyze_auto))
+ return llvm::make_unique<AnalyzeJobAction>(std::move(Input),
+ types::TY_Plist);
+ if (Args.hasArg(options::OPT__migrate))
+ return llvm::make_unique<MigrateJobAction>(std::move(Input),
+ types::TY_Remap);
+ if (Args.hasArg(options::OPT_emit_ast))
+ return llvm::make_unique<CompileJobAction>(std::move(Input),
+ types::TY_AST);
+ if (Args.hasArg(options::OPT_module_file_info))
+ return llvm::make_unique<CompileJobAction>(std::move(Input),
+ types::TY_ModuleFile);
+ if (Args.hasArg(options::OPT_verify_pch))
+ return llvm::make_unique<VerifyPCHJobAction>(std::move(Input),
+ types::TY_Nothing);
+ return llvm::make_unique<CompileJobAction>(std::move(Input),
+ types::TY_LLVM_BC);
+ }
+ case phases::Backend: {
+ if (IsUsingLTO(Args)) {
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
- return new CompileJobAction(Input, Output);
- } else if (Args.hasArg(options::OPT_emit_llvm)) {
+ return llvm::make_unique<BackendJobAction>(std::move(Input), Output);
+ }
+ if (Args.hasArg(options::OPT_emit_llvm)) {
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LLVM_IR : types::TY_LLVM_BC;
- return new CompileJobAction(Input, Output);
- } else {
- return new CompileJobAction(Input, types::TY_PP_Asm);
+ return llvm::make_unique<BackendJobAction>(std::move(Input), Output);
}
+ return llvm::make_unique<BackendJobAction>(std::move(Input),
+ types::TY_PP_Asm);
}
case phases::Assemble:
- return new AssembleJobAction(Input, types::TY_Object);
+ return llvm::make_unique<AssembleJobAction>(std::move(Input),
+ types::TY_Object);
}
llvm_unreachable("invalid phase in ConstructPhaseAction");
@@ -1351,9 +1386,8 @@ void Driver::BuildJobs(Compilation &C) const {
// files.
if (FinalOutput) {
unsigned NumOutputs = 0;
- for (ActionList::const_iterator it = C.getActions().begin(),
- ie = C.getActions().end(); it != ie; ++it)
- if ((*it)->getType() != types::TY_Nothing)
+ for (const Action *A : C.getActions())
+ if (A->getType() != types::TY_Nothing)
++NumOutputs;
if (NumOutputs > 1) {
@@ -1364,19 +1398,12 @@ void Driver::BuildJobs(Compilation &C) const {
// Collect the list of architectures.
llvm::StringSet<> ArchNames;
- if (C.getDefaultToolChain().getTriple().isOSBinFormatMachO()) {
- for (ArgList::const_iterator it = C.getArgs().begin(), ie = C.getArgs().end();
- it != ie; ++it) {
- Arg *A = *it;
+ if (C.getDefaultToolChain().getTriple().isOSBinFormatMachO())
+ for (const Arg *A : C.getArgs())
if (A->getOption().matches(options::OPT_arch))
ArchNames.insert(A->getValue());
- }
- }
-
- for (ActionList::const_iterator it = C.getActions().begin(),
- ie = C.getActions().end(); it != ie; ++it) {
- Action *A = *it;
+ for (Action *A : C.getActions()) {
// If we are linking an image for multiple archs then the linker wants
// -arch_multiple and -final_output <final image name>. Unfortunately, this
// doesn't fit in cleanly because we have to pass this information down.
@@ -1388,7 +1415,7 @@ void Driver::BuildJobs(Compilation &C) const {
if (FinalOutput)
LinkingOutput = FinalOutput->getValue();
else
- LinkingOutput = DefaultImageName.c_str();
+ LinkingOutput = getDefaultImageName();
}
InputInfo II;
@@ -1412,10 +1439,7 @@ void Driver::BuildJobs(Compilation &C) const {
// Claim --driver-mode, it was handled earlier.
(void) C.getArgs().hasArg(options::OPT_driver_mode);
- for (ArgList::const_iterator it = C.getArgs().begin(), ie = C.getArgs().end();
- it != ie; ++it) {
- Arg *A = *it;
-
+ for (Arg *A : C.getArgs()) {
// FIXME: It would be nice to be able to send the argument to the
// DiagnosticsEngine, so that extra values, position, and so on could be
// printed.
@@ -1462,12 +1486,34 @@ static const Tool *SelectToolForJob(Compilation &C, const ToolChain *TC,
!C.getArgs().hasArg(options::OPT__SLASH_FA) &&
!C.getArgs().hasArg(options::OPT__SLASH_Fa) &&
isa<AssembleJobAction>(JA) &&
- Inputs->size() == 1 && isa<CompileJobAction>(*Inputs->begin())) {
- const Tool *Compiler =
- TC->SelectTool(cast<JobAction>(**Inputs->begin()));
+ Inputs->size() == 1 && isa<BackendJobAction>(*Inputs->begin())) {
+ // A BackendJob is always preceded by a CompileJob, and without
+ // -save-temps they will always get combined together, so instead of
+ // checking the backend tool, check if the tool for the CompileJob
+ // has an integrated assembler.
+ const ActionList *BackendInputs = &(*Inputs)[0]->getInputs();
+ JobAction *CompileJA = cast<CompileJobAction>(*BackendInputs->begin());
+ const Tool *Compiler = TC->SelectTool(*CompileJA);
if (!Compiler)
return nullptr;
if (Compiler->hasIntegratedAssembler()) {
+ Inputs = &(*BackendInputs)[0]->getInputs();
+ ToolForJob = Compiler;
+ }
+ }
+
+ // A backend job should always be combined with the preceding compile job
+ // unless OPT_save_temps is enabled and the compiler is capable of emitting
+ // LLVM IR as an intermediate output.
+ if (isa<BackendJobAction>(JA)) {
+ // Check if the compiler supports emitting LLVM IR.
+ assert(Inputs->size() == 1);
+ JobAction *CompileJA = cast<CompileJobAction>(*Inputs->begin());
+ const Tool *Compiler = TC->SelectTool(*CompileJA);
+ if (!Compiler)
+ return nullptr;
+ if (!Compiler->canEmitIR() ||
+ !C.getArgs().hasArg(options::OPT_save_temps)) {
Inputs = &(*Inputs)[0]->getInputs();
ToolForJob = Compiler;
}
@@ -1537,8 +1583,7 @@ void Driver::BuildJobsForAction(Compilation &C,
// Only use pipes when there is exactly one input.
InputInfoList InputInfos;
- for (ActionList::const_iterator it = Inputs->begin(), ie = Inputs->end();
- it != ie; ++it) {
+ for (const Action *Input : *Inputs) {
// Treat dsymutil and verify sub-jobs as being at the top-level too, they
// shouldn't get temporary output names.
// FIXME: Clean this up.
@@ -1547,7 +1592,7 @@ void Driver::BuildJobsForAction(Compilation &C,
SubJobAtTopLevel = true;
InputInfo II;
- BuildJobsForAction(C, *it, TC, BoundArch, SubJobAtTopLevel, MultipleArchs,
+ BuildJobsForAction(C, Input, TC, BoundArch, SubJobAtTopLevel, MultipleArchs,
LinkingOutput, II);
InputInfos.push_back(II);
}
@@ -1583,6 +1628,11 @@ void Driver::BuildJobsForAction(Compilation &C,
}
}
+const char *Driver::getDefaultImageName() const {
+ llvm::Triple Target(llvm::Triple::normalize(DefaultTargetTriple));
+ return Target.isOSWindows() ? "a.exe" : "a.out";
+}
+
/// \brief Create output filename based on ArgValue, which could either be a
/// full filename, filename without extension, or a directory. If ArgValue
/// does not provide a filename, then use BaseName, and use the extension
@@ -1634,7 +1684,8 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
assert(AtTopLevel && isa<PreprocessJobAction>(JA));
StringRef BaseName = llvm::sys::path::filename(BaseInput);
StringRef NameArg;
- if (Arg *A = C.getArgs().getLastArg(options::OPT__SLASH_Fi))
+ if (Arg *A = C.getArgs().getLastArg(options::OPT__SLASH_Fi,
+ options::OPT__SLASH_o))
NameArg = A->getValue();
return C.addResultFile(MakeCLOutputFilename(C.getArgs(), NameArg, BaseName,
types::TY_PP_C), &JA);
@@ -1681,15 +1732,17 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
const char *NamedOutput;
if (JA.getType() == types::TY_Object &&
- C.getArgs().hasArg(options::OPT__SLASH_Fo)) {
- // The /Fo flag decides the object filename.
- StringRef Val = C.getArgs().getLastArg(options::OPT__SLASH_Fo)->getValue();
+ C.getArgs().hasArg(options::OPT__SLASH_Fo, options::OPT__SLASH_o)) {
+ // The /Fo or /o flag decides the object filename.
+ StringRef Val = C.getArgs().getLastArg(options::OPT__SLASH_Fo,
+ options::OPT__SLASH_o)->getValue();
NamedOutput = MakeCLOutputFilename(C.getArgs(), Val, BaseName,
types::TY_Object);
} else if (JA.getType() == types::TY_Image &&
- C.getArgs().hasArg(options::OPT__SLASH_Fe)) {
- // The /Fe flag names the linked file.
- StringRef Val = C.getArgs().getLastArg(options::OPT__SLASH_Fe)->getValue();
+ C.getArgs().hasArg(options::OPT__SLASH_Fe, options::OPT__SLASH_o)) {
+ // The /Fe or /o flag names the linked file.
+ StringRef Val = C.getArgs().getLastArg(options::OPT__SLASH_Fe,
+ options::OPT__SLASH_o)->getValue();
NamedOutput = MakeCLOutputFilename(C.getArgs(), Val, BaseName,
types::TY_Image);
} else if (JA.getType() == types::TY_Image) {
@@ -1698,12 +1751,12 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
NamedOutput = MakeCLOutputFilename(C.getArgs(), "", BaseName,
types::TY_Image);
} else if (MultipleArchs && BoundArch) {
- SmallString<128> Output(DefaultImageName.c_str());
+ SmallString<128> Output(getDefaultImageName());
Output += "-";
Output.append(BoundArch);
NamedOutput = C.getArgs().MakeArgString(Output.c_str());
} else
- NamedOutput = DefaultImageName.c_str();
+ NamedOutput = getDefaultImageName();
} else {
const char *Suffix = types::getTypeTempSuffix(JA.getType(), IsCLMode());
assert(Suffix && "All types used for output should have a suffix.");
@@ -1716,6 +1769,12 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
Suffixed += "-";
Suffixed.append(BoundArch);
}
+ // When using both -save-temps and -emit-llvm, use a ".tmp.bc" suffix for
+ // the unoptimized bitcode so that it does not get overwritten by the ".bc"
+ // optimized bitcode output.
+ if (!AtTopLevel && C.getArgs().hasArg(options::OPT_emit_llvm) &&
+ JA.getType() == types::TY_LLVM_BC)
+ Suffixed += ".tmp";
Suffixed += '.';
Suffixed += Suffix;
NamedOutput = C.getArgs().MakeArgString(Suffixed.c_str());
@@ -1793,51 +1852,56 @@ std::string Driver::GetFilePath(const char *Name, const ToolChain &TC) const {
return Name;
}
+void
+Driver::generatePrefixedToolNames(const char *Tool, const ToolChain &TC,
+ SmallVectorImpl<std::string> &Names) const {
+ // FIXME: Needs a better variable than DefaultTargetTriple
+ Names.push_back(DefaultTargetTriple + "-" + Tool);
+ Names.push_back(Tool);
+}
+
+static bool ScanDirForExecutable(SmallString<128> &Dir,
+ ArrayRef<std::string> Names) {
+ for (const auto &Name : Names) {
+ llvm::sys::path::append(Dir, Name);
+ if (llvm::sys::fs::can_execute(Twine(Dir)))
+ return true;
+ llvm::sys::path::remove_filename(Dir);
+ }
+ return false;
+}
+
std::string Driver::GetProgramPath(const char *Name,
const ToolChain &TC) const {
- // FIXME: Needs a better variable than DefaultTargetTriple
- std::string TargetSpecificExecutable(DefaultTargetTriple + "-" + Name);
+ SmallVector<std::string, 2> TargetSpecificExecutables;
+ generatePrefixedToolNames(Name, TC, TargetSpecificExecutables);
+
// Respect a limited subset of the '-Bprefix' functionality in GCC by
// attempting to use this prefix when looking for program paths.
- for (Driver::prefix_list::const_iterator it = PrefixDirs.begin(),
- ie = PrefixDirs.end(); it != ie; ++it) {
- if (llvm::sys::fs::is_directory(*it)) {
- SmallString<128> P(*it);
- llvm::sys::path::append(P, TargetSpecificExecutable);
- if (llvm::sys::fs::can_execute(Twine(P)))
- return P.str();
- llvm::sys::path::remove_filename(P);
- llvm::sys::path::append(P, Name);
- if (llvm::sys::fs::can_execute(Twine(P)))
+ for (const auto &PrefixDir : PrefixDirs) {
+ if (llvm::sys::fs::is_directory(PrefixDir)) {
+ SmallString<128> P(PrefixDir);
+ if (ScanDirForExecutable(P, TargetSpecificExecutables))
return P.str();
} else {
- SmallString<128> P(*it + Name);
+ SmallString<128> P(PrefixDir + Name);
if (llvm::sys::fs::can_execute(Twine(P)))
return P.str();
}
}
const ToolChain::path_list &List = TC.getProgramPaths();
- for (ToolChain::path_list::const_iterator
- it = List.begin(), ie = List.end(); it != ie; ++it) {
- SmallString<128> P(*it);
- llvm::sys::path::append(P, TargetSpecificExecutable);
- if (llvm::sys::fs::can_execute(Twine(P)))
- return P.str();
- llvm::sys::path::remove_filename(P);
- llvm::sys::path::append(P, Name);
- if (llvm::sys::fs::can_execute(Twine(P)))
+ for (const auto &Path : List) {
+ SmallString<128> P(Path);
+ if (ScanDirForExecutable(P, TargetSpecificExecutables))
return P.str();
}
// If all else failed, search the path.
- std::string P(llvm::sys::FindProgramByName(TargetSpecificExecutable));
- if (!P.empty())
- return P;
-
- P = llvm::sys::FindProgramByName(Name);
- if (!P.empty())
- return P;
+ for (const auto &TargetSpecificExecutable : TargetSpecificExecutables)
+ if (llvm::ErrorOr<std::string> P =
+ llvm::sys::findProgramByName(TargetSpecificExecutable))
+ return *P;
return Name;
}
@@ -1893,8 +1957,6 @@ static llvm::Triple computeTargetTriple(StringRef DefaultTargetTriple,
Target.setArch(llvm::Triple::mips64el);
else if (Target.getArch() == llvm::Triple::aarch64_be)
Target.setArch(llvm::Triple::aarch64);
- else if (Target.getArch() == llvm::Triple::arm64_be)
- Target.setArch(llvm::Triple::arm64);
} else {
if (Target.getArch() == llvm::Triple::mipsel)
Target.setArch(llvm::Triple::mips);
@@ -1902,15 +1964,11 @@ static llvm::Triple computeTargetTriple(StringRef DefaultTargetTriple,
Target.setArch(llvm::Triple::mips64);
else if (Target.getArch() == llvm::Triple::aarch64)
Target.setArch(llvm::Triple::aarch64_be);
- else if (Target.getArch() == llvm::Triple::arm64)
- Target.setArch(llvm::Triple::arm64_be);
}
}
// Skip further flag support on OSes which don't support '-m32' or '-m64'.
- if (Target.getArchName() == "tce" ||
- Target.getOS() == llvm::Triple::AuroraUX ||
- Target.getOS() == llvm::Triple::Minix)
+ if (Target.getArchName() == "tce" || Target.getOS() == llvm::Triple::Minix)
return Target;
// Handle pseudo-target flags '-m64', '-mx32', '-m32' and '-m16'.
@@ -1918,21 +1976,25 @@ static llvm::Triple computeTargetTriple(StringRef DefaultTargetTriple,
options::OPT_m32, options::OPT_m16)) {
llvm::Triple::ArchType AT = llvm::Triple::UnknownArch;
- if (A->getOption().matches(options::OPT_m64))
+ if (A->getOption().matches(options::OPT_m64)) {
AT = Target.get64BitArchVariant().getArch();
- else if (A->getOption().matches(options::OPT_mx32) &&
+ if (Target.getEnvironment() == llvm::Triple::GNUX32)
+ Target.setEnvironment(llvm::Triple::GNU);
+ } else if (A->getOption().matches(options::OPT_mx32) &&
Target.get64BitArchVariant().getArch() == llvm::Triple::x86_64) {
AT = llvm::Triple::x86_64;
Target.setEnvironment(llvm::Triple::GNUX32);
- } else if (A->getOption().matches(options::OPT_m32))
+ } else if (A->getOption().matches(options::OPT_m32)) {
AT = Target.get32BitArchVariant().getArch();
- else if (A->getOption().matches(options::OPT_m16) &&
+ if (Target.getEnvironment() == llvm::Triple::GNUX32)
+ Target.setEnvironment(llvm::Triple::GNU);
+ } else if (A->getOption().matches(options::OPT_m16) &&
Target.get32BitArchVariant().getArch() == llvm::Triple::x86) {
AT = llvm::Triple::x86;
Target.setEnvironment(llvm::Triple::CODE16);
}
- if (AT != llvm::Triple::UnknownArch)
+ if (AT != llvm::Triple::UnknownArch && AT != Target.getArch())
Target.setArch(AT);
}
@@ -1947,9 +2009,6 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
ToolChain *&TC = ToolChains[Target.str()];
if (!TC) {
switch (Target.getOS()) {
- case llvm::Triple::AuroraUX:
- TC = new toolchains::AuroraUX(*this, Target, Args);
- break;
case llvm::Triple::Darwin:
case llvm::Triple::MacOSX:
case llvm::Triple::IOS:
@@ -2000,9 +2059,12 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
else
TC = new toolchains::Generic_GCC(*this, Target, Args);
break;
+ case llvm::Triple::Itanium:
+ TC = new toolchains::CrossWindowsToolChain(*this, Target, Args);
+ break;
case llvm::Triple::MSVC:
case llvm::Triple::UnknownEnvironment:
- TC = new toolchains::Windows(*this, Target, Args);
+ TC = new toolchains::MSVCToolChain(*this, Target, Args);
break;
}
break;
@@ -2025,7 +2087,7 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
TC = new toolchains::Generic_ELF(*this, Target, Args);
break;
}
- if (Target.getObjectFormat() == llvm::Triple::MachO) {
+ if (Target.isOSBinFormatMachO()) {
TC = new toolchains::MachO(*this, Target, Args);
break;
}
@@ -2045,7 +2107,7 @@ bool Driver::ShouldUseClangCompiler(const JobAction &JA) const {
// Otherwise make sure this is an action clang understands.
if (!isa<PreprocessJobAction>(JA) && !isa<PrecompileJobAction>(JA) &&
- !isa<CompileJobAction>(JA))
+ !isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA))
return false;
return true;
diff --git a/lib/Driver/InputInfo.h b/lib/Driver/InputInfo.h
index 4eedd22a62da..b23ba575b65e 100644
--- a/lib/Driver/InputInfo.h
+++ b/lib/Driver/InputInfo.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_LIB_DRIVER_INPUTINFO_H_
-#define CLANG_LIB_DRIVER_INPUTINFO_H_
+#ifndef LLVM_CLANG_LIB_DRIVER_INPUTINFO_H
+#define LLVM_CLANG_LIB_DRIVER_INPUTINFO_H
#include "clang/Driver/Types.h"
#include "llvm/Option/Arg.h"
diff --git a/lib/Driver/Job.cpp b/lib/Driver/Job.cpp
index 42cc1bc29077..c5b3f5a307b9 100644
--- a/lib/Driver/Job.cpp
+++ b/lib/Driver/Job.cpp
@@ -12,8 +12,10 @@
#include "clang/Driver/Job.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/raw_ostream.h"
@@ -21,6 +23,7 @@
using namespace clang::driver;
using llvm::raw_ostream;
using llvm::StringRef;
+using llvm::ArrayRef;
Job::~Job() {}
@@ -28,7 +31,8 @@ Command::Command(const Action &_Source, const Tool &_Creator,
const char *_Executable,
const ArgStringList &_Arguments)
: Job(CommandClass), Source(_Source), Creator(_Creator),
- Executable(_Executable), Arguments(_Arguments) {}
+ Executable(_Executable), Arguments(_Arguments),
+ ResponseFile(nullptr) {}
static int skipArgs(const char *Flag) {
// These flags are all of the form -Flag <Arg> and are treated as two
@@ -69,12 +73,6 @@ static int skipArgs(const char *Flag) {
return 0;
}
-static bool quoteNextArg(const char *flag) {
- return llvm::StringSwitch<bool>(flag)
- .Case("-D", true)
- .Default(false);
-}
-
static void PrintArg(raw_ostream &OS, const char *Arg, bool Quote) {
const bool Escape = std::strpbrk(Arg, "\"\\$");
@@ -93,38 +91,162 @@ static void PrintArg(raw_ostream &OS, const char *Arg, bool Quote) {
OS << '"';
}
+void Command::writeResponseFile(raw_ostream &OS) const {
+ // In a file list, we only write the set of inputs to the response file
+ if (Creator.getResponseFilesSupport() == Tool::RF_FileList) {
+ for (const char *Arg : InputFileList) {
+ OS << Arg << '\n';
+ }
+ return;
+ }
+
+ // In regular response files, we send all arguments to the response file
+ for (const char *Arg : Arguments) {
+ OS << '"';
+
+ for (; *Arg != '\0'; Arg++) {
+ if (*Arg == '\"' || *Arg == '\\') {
+ OS << '\\';
+ }
+ OS << *Arg;
+ }
+
+ OS << "\" ";
+ }
+}
+
+void Command::buildArgvForResponseFile(
+ llvm::SmallVectorImpl<const char *> &Out) const {
+ // When not a file list, all arguments are sent to the response file.
+ // This leaves us to set the argv to a single parameter, requesting the tool
+ // to read the response file.
+ if (Creator.getResponseFilesSupport() != Tool::RF_FileList) {
+ Out.push_back(Executable);
+ Out.push_back(ResponseFileFlag.c_str());
+ return;
+ }
+
+ llvm::StringSet<> Inputs;
+ for (const char *InputName : InputFileList)
+ Inputs.insert(InputName);
+ Out.push_back(Executable);
+ // In a file list, build args vector ignoring parameters that will go in the
+ // response file (elements of the InputFileList vector)
+ bool FirstInput = true;
+ for (const char *Arg : Arguments) {
+ if (Inputs.count(Arg) == 0) {
+ Out.push_back(Arg);
+ } else if (FirstInput) {
+ FirstInput = false;
+ Out.push_back(Creator.getResponseFileFlag());
+ Out.push_back(ResponseFile);
+ }
+ }
+}
+
void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
- bool CrashReport) const {
- OS << " \"" << Executable << '"';
+ CrashReportInfo *CrashInfo) const {
+ // Always quote the exe.
+ OS << ' ';
+ PrintArg(OS, Executable, /*Quote=*/true);
+
+ llvm::ArrayRef<const char *> Args = Arguments;
+ llvm::SmallVector<const char *, 128> ArgsRespFile;
+ if (ResponseFile != nullptr) {
+ buildArgvForResponseFile(ArgsRespFile);
+ Args = ArrayRef<const char *>(ArgsRespFile).slice(1); // no executable name
+ }
- for (size_t i = 0, e = Arguments.size(); i < e; ++i) {
- const char *const Arg = Arguments[i];
+ StringRef MainFilename;
+ // We'll need the argument to -main-file-name to find the input file name.
+ if (CrashInfo)
+ for (size_t I = 0, E = Args.size(); I + 1 < E; ++I)
+ if (StringRef(Args[I]).equals("-main-file-name"))
+ MainFilename = Args[I + 1];
- if (CrashReport) {
+ for (size_t i = 0, e = Args.size(); i < e; ++i) {
+ const char *const Arg = Args[i];
+
+ if (CrashInfo) {
if (int Skip = skipArgs(Arg)) {
i += Skip - 1;
continue;
+ } else if (llvm::sys::path::filename(Arg) == MainFilename &&
+ (i == 0 || StringRef(Args[i - 1]) != "-main-file-name")) {
+ // Replace the input file name with the crashinfo's file name.
+ OS << ' ';
+ StringRef ShortName = llvm::sys::path::filename(CrashInfo->Filename);
+ PrintArg(OS, ShortName.str().c_str(), Quote);
+ continue;
}
}
OS << ' ';
PrintArg(OS, Arg, Quote);
+ }
- if (CrashReport && quoteNextArg(Arg) && i + 1 < e) {
- OS << ' ';
- PrintArg(OS, Arguments[++i], true);
- }
+ if (CrashInfo && !CrashInfo->VFSPath.empty()) {
+ OS << ' ';
+ PrintArg(OS, "-ivfsoverlay", Quote);
+ OS << ' ';
+ PrintArg(OS, CrashInfo->VFSPath.str().c_str(), Quote);
}
+
+ if (ResponseFile != nullptr) {
+ OS << "\n Arguments passed via response file:\n";
+ writeResponseFile(OS);
+ // Avoiding duplicated newline terminator, since FileLists are
+ // newline-separated.
+ if (Creator.getResponseFilesSupport() != Tool::RF_FileList)
+ OS << "\n";
+ OS << " (end of response file)";
+ }
+
OS << Terminator;
}
+void Command::setResponseFile(const char *FileName) {
+ ResponseFile = FileName;
+ ResponseFileFlag = Creator.getResponseFileFlag();
+ ResponseFileFlag += FileName;
+}
+
int Command::Execute(const StringRef **Redirects, std::string *ErrMsg,
bool *ExecutionFailed) const {
SmallVector<const char*, 128> Argv;
- Argv.push_back(Executable);
- for (size_t i = 0, e = Arguments.size(); i != e; ++i)
- Argv.push_back(Arguments[i]);
+
+ if (ResponseFile == nullptr) {
+ Argv.push_back(Executable);
+ for (size_t i = 0, e = Arguments.size(); i != e; ++i)
+ Argv.push_back(Arguments[i]);
+ Argv.push_back(nullptr);
+
+ return llvm::sys::ExecuteAndWait(Executable, Argv.data(), /*env*/ nullptr,
+ Redirects, /*secondsToWait*/ 0,
+ /*memoryLimit*/ 0, ErrMsg,
+ ExecutionFailed);
+ }
+
+ // We need to put arguments in a response file (command is too large)
+ // Open stream to store the response file contents
+ std::string RespContents;
+ llvm::raw_string_ostream SS(RespContents);
+
+ // Write file contents and build the Argv vector
+ writeResponseFile(SS);
+ buildArgvForResponseFile(Argv);
Argv.push_back(nullptr);
+ SS.flush();
+
+ // Save the response file in the appropriate encoding
+ if (std::error_code EC = writeFileWithEncoding(
+ ResponseFile, RespContents, Creator.getResponseFileEncoding())) {
+ if (ErrMsg)
+ *ErrMsg = EC.message();
+ if (ExecutionFailed)
+ *ExecutionFailed = true;
+ return -1;
+ }
return llvm::sys::ExecuteAndWait(Executable, Argv.data(), /*env*/ nullptr,
Redirects, /*secondsToWait*/ 0,
@@ -134,15 +256,15 @@ int Command::Execute(const StringRef **Redirects, std::string *ErrMsg,
FallbackCommand::FallbackCommand(const Action &Source_, const Tool &Creator_,
const char *Executable_,
const ArgStringList &Arguments_,
- Command *Fallback_)
- : Command(Source_, Creator_, Executable_, Arguments_), Fallback(Fallback_) {
-}
+ std::unique_ptr<Command> Fallback_)
+ : Command(Source_, Creator_, Executable_, Arguments_),
+ Fallback(std::move(Fallback_)) {}
void FallbackCommand::Print(raw_ostream &OS, const char *Terminator,
- bool Quote, bool CrashReport) const {
- Command::Print(OS, "", Quote, CrashReport);
+ bool Quote, CrashReportInfo *CrashInfo) const {
+ Command::Print(OS, "", Quote, CrashInfo);
OS << " ||";
- Fallback->Print(OS, Terminator, Quote, CrashReport);
+ Fallback->Print(OS, Terminator, Quote, CrashInfo);
}
static bool ShouldFallback(int ExitCode) {
@@ -173,17 +295,10 @@ int FallbackCommand::Execute(const StringRef **Redirects, std::string *ErrMsg,
JobList::JobList() : Job(JobListClass) {}
-JobList::~JobList() {
- for (iterator it = begin(), ie = end(); it != ie; ++it)
- delete *it;
-}
-
void JobList::Print(raw_ostream &OS, const char *Terminator, bool Quote,
- bool CrashReport) const {
- for (const_iterator it = begin(), ie = end(); it != ie; ++it)
- (*it)->Print(OS, Terminator, Quote, CrashReport);
+ CrashReportInfo *CrashInfo) const {
+ for (const auto &Job : *this)
+ Job.Print(OS, Terminator, Quote, CrashInfo);
}
-void JobList::clear() {
- DeleteContainerPointers(Jobs);
-}
+void JobList::clear() { Jobs.clear(); }
diff --git a/lib/Driver/MSVCToolChain.cpp b/lib/Driver/MSVCToolChain.cpp
new file mode 100644
index 000000000000..d6bd5c31ae6e
--- /dev/null
+++ b/lib/Driver/MSVCToolChain.cpp
@@ -0,0 +1,496 @@
+//===--- ToolChains.cpp - ToolChain Implementations -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ToolChains.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Version.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Process.h"
+
+// Include the necessary headers to interface with the Windows registry and
+// environment.
+#if defined(LLVM_ON_WIN32)
+#define USE_WIN32
+#endif
+
+#ifdef USE_WIN32
+ #define WIN32_LEAN_AND_MEAN
+ #define NOGDI
+ #ifndef NOMINMAX
+ #define NOMINMAX
+ #endif
+ #include <windows.h>
+#endif
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang;
+using namespace llvm::opt;
+
+MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple& Triple,
+ const ArgList &Args)
+ : ToolChain(D, Triple, Args) {
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+}
+
+Tool *MSVCToolChain::buildLinker() const {
+ return new tools::visualstudio::Link(*this);
+}
+
+Tool *MSVCToolChain::buildAssembler() const {
+ if (getTriple().isOSBinFormatMachO())
+ return new tools::darwin::Assemble(*this);
+ getDriver().Diag(clang::diag::err_no_external_assembler);
+ return nullptr;
+}
+
+bool MSVCToolChain::IsIntegratedAssemblerDefault() const {
+ return true;
+}
+
+bool MSVCToolChain::IsUnwindTablesDefault() const {
+ // Emit unwind tables by default on Win64. All non-x86_32 Windows platforms
+ // such as ARM and PPC actually require unwind tables, but LLVM doesn't know
+ // how to generate them yet.
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool MSVCToolChain::isPICDefault() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool MSVCToolChain::isPIEDefault() const {
+ return false;
+}
+
+bool MSVCToolChain::isPICDefaultForced() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+#ifdef USE_WIN32
+static bool readFullStringValue(HKEY hkey, const char *valueName,
+ std::string &value) {
+ // FIXME: We should be using the W versions of the registry functions, but
+ // doing so requires UTF8 / UTF16 conversions similar to how we handle command
+ // line arguments. The UTF8 conversion functions are not exposed publicly
+ // from LLVM though, so in order to do this we will probably need to create
+ // a registry abstraction in LLVMSupport that is Windows only.
+ DWORD result = 0;
+ DWORD valueSize = 0;
+ DWORD type = 0;
+ // First just query for the required size.
+ result = RegQueryValueEx(hkey, valueName, NULL, &type, NULL, &valueSize);
+ if (result != ERROR_SUCCESS || type != REG_SZ)
+ return false;
+ std::vector<BYTE> buffer(valueSize);
+ result = RegQueryValueEx(hkey, valueName, NULL, NULL, &buffer[0], &valueSize);
+ if (result == ERROR_SUCCESS)
+ value.assign(reinterpret_cast<const char *>(buffer.data()));
+ return result;
+}
+#endif
+
+/// \brief Read registry string.
+/// This also supports a means to look for high-versioned keys by use
+/// of a $VERSION placeholder in the key path.
+/// $VERSION in the key path is a placeholder for the version number,
+/// causing the highest value path to be searched for and used.
+/// I.e. "SOFTWARE\\Microsoft\\VisualStudio\\$VERSION".
+/// There can be additional characters in the component. Only the numeric
+/// characters are compared. This function only searches HKLM.
+static bool getSystemRegistryString(const char *keyPath, const char *valueName,
+ std::string &value, std::string *phValue) {
+#ifndef USE_WIN32
+ return false;
+#else
+ HKEY hRootKey = HKEY_LOCAL_MACHINE;
+ HKEY hKey = NULL;
+ long lResult;
+ bool returnValue = false;
+
+ const char *placeHolder = strstr(keyPath, "$VERSION");
+ std::string bestName;
+ // If we have a $VERSION placeholder, do the highest-version search.
+ if (placeHolder) {
+ const char *keyEnd = placeHolder - 1;
+ const char *nextKey = placeHolder;
+ // Find end of previous key.
+ while ((keyEnd > keyPath) && (*keyEnd != '\\'))
+ keyEnd--;
+ // Find end of key containing $VERSION.
+ while (*nextKey && (*nextKey != '\\'))
+ nextKey++;
+ size_t partialKeyLength = keyEnd - keyPath;
+ char partialKey[256];
+ if (partialKeyLength > sizeof(partialKey))
+ partialKeyLength = sizeof(partialKey);
+ strncpy(partialKey, keyPath, partialKeyLength);
+ partialKey[partialKeyLength] = '\0';
+ HKEY hTopKey = NULL;
+ lResult = RegOpenKeyEx(hRootKey, partialKey, 0, KEY_READ | KEY_WOW64_32KEY,
+ &hTopKey);
+ if (lResult == ERROR_SUCCESS) {
+ char keyName[256];
+ double bestValue = 0.0;
+ DWORD index, size = sizeof(keyName) - 1;
+ for (index = 0; RegEnumKeyEx(hTopKey, index, keyName, &size, NULL,
+ NULL, NULL, NULL) == ERROR_SUCCESS; index++) {
+ const char *sp = keyName;
+ while (*sp && !isDigit(*sp))
+ sp++;
+ if (!*sp)
+ continue;
+ const char *ep = sp + 1;
+ while (*ep && (isDigit(*ep) || (*ep == '.')))
+ ep++;
+ char numBuf[32];
+ strncpy(numBuf, sp, sizeof(numBuf) - 1);
+ numBuf[sizeof(numBuf) - 1] = '\0';
+ double dvalue = strtod(numBuf, NULL);
+ if (dvalue > bestValue) {
+ // Test that InstallDir is indeed there before keeping this index.
+ // Open the chosen key path remainder.
+ bestName = keyName;
+ // Append rest of key.
+ bestName.append(nextKey);
+ lResult = RegOpenKeyEx(hTopKey, bestName.c_str(), 0,
+ KEY_READ | KEY_WOW64_32KEY, &hKey);
+ if (lResult == ERROR_SUCCESS) {
+ lResult = readFullStringValue(hKey, valueName, value);
+ if (lResult == ERROR_SUCCESS) {
+ bestValue = dvalue;
+ if (phValue)
+ *phValue = bestName;
+ returnValue = true;
+ }
+ RegCloseKey(hKey);
+ }
+ }
+ size = sizeof(keyName) - 1;
+ }
+ RegCloseKey(hTopKey);
+ }
+ } else {
+ lResult =
+ RegOpenKeyEx(hRootKey, keyPath, 0, KEY_READ | KEY_WOW64_32KEY, &hKey);
+ if (lResult == ERROR_SUCCESS) {
+ lResult = readFullStringValue(hKey, valueName, value);
+ if (lResult == ERROR_SUCCESS)
+ returnValue = true;
+ if (phValue)
+ phValue->clear();
+ RegCloseKey(hKey);
+ }
+ }
+ return returnValue;
+#endif // USE_WIN32
+}
+
+/// \brief Get Windows SDK installation directory.
+bool MSVCToolChain::getWindowsSDKDir(std::string &path, int &major,
+ int &minor) const {
+ std::string sdkVersion;
+ // Try the Windows registry.
+ bool hasSDKDir = getSystemRegistryString(
+ "SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
+ "InstallationFolder", path, &sdkVersion);
+ if (!sdkVersion.empty())
+ ::sscanf(sdkVersion.c_str(), "v%d.%d", &major, &minor);
+ return hasSDKDir && !path.empty();
+}
+
+// Gets the library path required to link against the Windows SDK.
+bool MSVCToolChain::getWindowsSDKLibraryPath(std::string &path) const {
+ std::string sdkPath;
+ int sdkMajor = 0;
+ int sdkMinor = 0;
+
+ path.clear();
+ if (!getWindowsSDKDir(sdkPath, sdkMajor, sdkMinor))
+ return false;
+
+ llvm::SmallString<128> libPath(sdkPath);
+ llvm::sys::path::append(libPath, "Lib");
+ if (sdkMajor <= 7) {
+ switch (getArch()) {
+ // In Windows SDK 7.x, x86 libraries are directly in the Lib folder.
+ case llvm::Triple::x86:
+ break;
+ case llvm::Triple::x86_64:
+ llvm::sys::path::append(libPath, "x64");
+ break;
+ case llvm::Triple::arm:
+ // It is not necessary to link against Windows SDK 7.x when targeting ARM.
+ return false;
+ default:
+ return false;
+ }
+ } else {
+ // Windows SDK 8.x installs libraries in a folder whose names depend on the
+ // version of the OS you're targeting. By default choose the newest, which
+ // usually corresponds to the version of the OS you've installed the SDK on.
+ const char *tests[] = {"winv6.3", "win8", "win7"};
+ bool found = false;
+ for (const char *test : tests) {
+ llvm::SmallString<128> testPath(libPath);
+ llvm::sys::path::append(testPath, test);
+ if (llvm::sys::fs::exists(testPath.c_str())) {
+ libPath = testPath;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return false;
+
+ llvm::sys::path::append(libPath, "um");
+ switch (getArch()) {
+ case llvm::Triple::x86:
+ llvm::sys::path::append(libPath, "x86");
+ break;
+ case llvm::Triple::x86_64:
+ llvm::sys::path::append(libPath, "x64");
+ break;
+ case llvm::Triple::arm:
+ llvm::sys::path::append(libPath, "arm");
+ break;
+ default:
+ return false;
+ }
+ }
+
+ path = libPath.str();
+ return true;
+}
+
+// Get the location to use for Visual Studio binaries. The location priority
+// is: %VCINSTALLDIR% > %PATH% > newest copy of Visual Studio installed on
+// system (as reported by the registry).
+bool MSVCToolChain::getVisualStudioBinariesFolder(const char *clangProgramPath,
+ std::string &path) const {
+ path.clear();
+
+ SmallString<128> BinDir;
+
+ // First check the environment variables that vsvars32.bat sets.
+ llvm::Optional<std::string> VcInstallDir =
+ llvm::sys::Process::GetEnv("VCINSTALLDIR");
+ if (VcInstallDir.hasValue()) {
+ BinDir = VcInstallDir.getValue();
+ llvm::sys::path::append(BinDir, "bin");
+ } else {
+ // Next walk the PATH, trying to find a cl.exe in the path. If we find one,
+ // use that. However, make sure it's not clang's cl.exe.
+ llvm::Optional<std::string> OptPath = llvm::sys::Process::GetEnv("PATH");
+ if (OptPath.hasValue()) {
+ const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'};
+ SmallVector<StringRef, 8> PathSegments;
+ llvm::SplitString(OptPath.getValue(), PathSegments, EnvPathSeparatorStr);
+
+ for (StringRef PathSegment : PathSegments) {
+ if (PathSegment.empty())
+ continue;
+
+ SmallString<128> FilePath(PathSegment);
+ llvm::sys::path::append(FilePath, "cl.exe");
+ if (llvm::sys::fs::can_execute(FilePath.c_str()) &&
+ !llvm::sys::fs::equivalent(FilePath.c_str(), clangProgramPath)) {
+ // If we found it on the PATH, use it exactly as is with no
+ // modifications.
+ path = PathSegment;
+ return true;
+ }
+ }
+ }
+
+ std::string installDir;
+ // With no VCINSTALLDIR and nothing on the PATH, if we can't find it in the
+ // registry then we have no choice but to fail.
+ if (!getVisualStudioInstallDir(installDir))
+ return false;
+
+ // Regardless of what binary we're ultimately trying to find, we make sure
+ // that this is a Visual Studio directory by checking for cl.exe. We use
+ // cl.exe instead of other binaries like link.exe because programs such as
+ // GnuWin32 also have a utility called link.exe, so cl.exe is the least
+ // ambiguous.
+ BinDir = installDir;
+ llvm::sys::path::append(BinDir, "VC", "bin");
+ SmallString<128> ClPath(BinDir);
+ llvm::sys::path::append(ClPath, "cl.exe");
+
+ if (!llvm::sys::fs::can_execute(ClPath.c_str()))
+ return false;
+ }
+
+ if (BinDir.empty())
+ return false;
+
+ switch (getArch()) {
+ case llvm::Triple::x86:
+ break;
+ case llvm::Triple::x86_64:
+ llvm::sys::path::append(BinDir, "amd64");
+ break;
+ case llvm::Triple::arm:
+ llvm::sys::path::append(BinDir, "arm");
+ break;
+ default:
+ // Whatever this is, Visual Studio doesn't have a toolchain for it.
+ return false;
+ }
+ path = BinDir.str();
+ return true;
+}
+
+// Get Visual Studio installation directory.
+bool MSVCToolChain::getVisualStudioInstallDir(std::string &path) const {
+ // First check the environment variables that vsvars32.bat sets.
+ const char *vcinstalldir = getenv("VCINSTALLDIR");
+ if (vcinstalldir) {
+ path = vcinstalldir;
+ path = path.substr(0, path.find("\\VC"));
+ return true;
+ }
+
+ std::string vsIDEInstallDir;
+ std::string vsExpressIDEInstallDir;
+ // Then try the windows registry.
+ bool hasVCDir =
+ getSystemRegistryString("SOFTWARE\\Microsoft\\VisualStudio\\$VERSION",
+ "InstallDir", vsIDEInstallDir, nullptr);
+ if (hasVCDir && !vsIDEInstallDir.empty()) {
+ path = vsIDEInstallDir.substr(0, vsIDEInstallDir.find("\\Common7\\IDE"));
+ return true;
+ }
+
+ bool hasVCExpressDir =
+ getSystemRegistryString("SOFTWARE\\Microsoft\\VCExpress\\$VERSION",
+ "InstallDir", vsExpressIDEInstallDir, nullptr);
+ if (hasVCExpressDir && !vsExpressIDEInstallDir.empty()) {
+ path = vsExpressIDEInstallDir.substr(
+ 0, vsIDEInstallDir.find("\\Common7\\IDE"));
+ return true;
+ }
+
+ // Try the environment.
+ const char *vs120comntools = getenv("VS120COMNTOOLS");
+ const char *vs100comntools = getenv("VS100COMNTOOLS");
+ const char *vs90comntools = getenv("VS90COMNTOOLS");
+ const char *vs80comntools = getenv("VS80COMNTOOLS");
+
+ const char *vscomntools = nullptr;
+
+ // Find any version we can
+ if (vs120comntools)
+ vscomntools = vs120comntools;
+ else if (vs100comntools)
+ vscomntools = vs100comntools;
+ else if (vs90comntools)
+ vscomntools = vs90comntools;
+ else if (vs80comntools)
+ vscomntools = vs80comntools;
+
+ if (vscomntools && *vscomntools) {
+ const char *p = strstr(vscomntools, "\\Common7\\Tools");
+ path = p ? std::string(vscomntools, p) : vscomntools;
+ return true;
+ }
+ return false;
+}
+
+void MSVCToolChain::AddSystemIncludeWithSubfolder(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const std::string &folder,
+ const char *subfolder) const {
+ llvm::SmallString<128> path(folder);
+ llvm::sys::path::append(path, subfolder);
+ addSystemInclude(DriverArgs, CC1Args, path.str());
+}
+
+void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(getDriver().ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Honor %INCLUDE%. It should know essential search paths with vcvarsall.bat.
+ if (const char *cl_include_dir = getenv("INCLUDE")) {
+ SmallVector<StringRef, 8> Dirs;
+ StringRef(cl_include_dir)
+ .split(Dirs, ";", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+ for (StringRef Dir : Dirs)
+ addSystemInclude(DriverArgs, CC1Args, Dir);
+ if (!Dirs.empty())
+ return;
+ }
+
+ std::string VSDir;
+
+ // When built with access to the proper Windows APIs, try to actually find
+ // the correct include paths first.
+ if (getVisualStudioInstallDir(VSDir)) {
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, VSDir, "VC\\include");
+
+ std::string WindowsSDKDir;
+ int major, minor;
+ if (getWindowsSDKDir(WindowsSDKDir, major, minor)) {
+ if (major >= 8) {
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
+ "include\\shared");
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
+ "include\\um");
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
+ "include\\winrt");
+ } else {
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
+ "include");
+ }
+ } else {
+ addSystemInclude(DriverArgs, CC1Args, VSDir);
+ }
+ return;
+ }
+
+ // As a fallback, select default install paths.
+ // FIXME: Don't guess drives and paths like this on Windows.
+ const StringRef Paths[] = {
+ "C:/Program Files/Microsoft Visual Studio 10.0/VC/include",
+ "C:/Program Files/Microsoft Visual Studio 9.0/VC/include",
+ "C:/Program Files/Microsoft Visual Studio 9.0/VC/PlatformSDK/Include",
+ "C:/Program Files/Microsoft Visual Studio 8/VC/include",
+ "C:/Program Files/Microsoft Visual Studio 8/VC/PlatformSDK/Include"
+ };
+ addSystemIncludes(DriverArgs, CC1Args, Paths);
+}
+
+void MSVCToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // FIXME: There should probably be logic here to find libc++ on Windows.
+}
diff --git a/lib/Driver/Multilib.cpp b/lib/Driver/Multilib.cpp
index 484ce1627d7b..1f5d62f247c4 100644
--- a/lib/Driver/Multilib.cpp
+++ b/lib/Driver/Multilib.cpp
@@ -37,7 +37,7 @@ static void normalizePathSegment(std::string &Segment) {
// Prune trailing "/" or "./"
while (1) {
- StringRef last = *--path::end(seg);
+ StringRef last = path::filename(seg);
if (last != ".")
break;
seg = path::parent_path(seg);
diff --git a/lib/Driver/Phases.cpp b/lib/Driver/Phases.cpp
index 155e53b64fc1..7ae270857f4d 100644
--- a/lib/Driver/Phases.cpp
+++ b/lib/Driver/Phases.cpp
@@ -18,6 +18,7 @@ const char *phases::getPhaseName(ID Id) {
case Preprocess: return "preprocessor";
case Precompile: return "precompiler";
case Compile: return "compiler";
+ case Backend: return "backend";
case Assemble: return "assembler";
case Link: return "linker";
}
diff --git a/lib/Driver/SanitizerArgs.cpp b/lib/Driver/SanitizerArgs.cpp
index b64f0275768c..bd7bc218e3bf 100644
--- a/lib/Driver/SanitizerArgs.cpp
+++ b/lib/Driver/SanitizerArgs.cpp
@@ -21,93 +21,263 @@
using namespace clang::driver;
using namespace llvm::opt;
+namespace {
+/// Assign ordinals to possible values of -fsanitize= flag.
+/// We use the ordinal values as bit positions within \c SanitizeKind.
+enum SanitizeOrdinal {
+#define SANITIZER(NAME, ID) SO_##ID,
+#define SANITIZER_GROUP(NAME, ID, ALIAS) SO_##ID##Group,
+#include "clang/Basic/Sanitizers.def"
+ SO_Count
+};
+
+/// Represents a set of sanitizer kinds. It is also used to define:
+/// 1) set of sanitizers each sanitizer group expands into.
+/// 2) set of sanitizers sharing a specific property (e.g.
+/// all sanitizers with zero-base shadow).
+enum SanitizeKind {
+#define SANITIZER(NAME, ID) ID = 1 << SO_##ID,
+#define SANITIZER_GROUP(NAME, ID, ALIAS) \
+ID = ALIAS, ID##Group = 1 << SO_##ID##Group,
+#include "clang/Basic/Sanitizers.def"
+ NeedsUbsanRt = Undefined | Integer,
+ NotAllowedWithTrap = Vptr,
+ RequiresPIE = Memory | DataFlow,
+ NeedsUnwindTables = Address | Thread | Memory | DataFlow,
+ SupportsCoverage = Address | Memory | Leak | Undefined | Integer,
+ RecoverableByDefault = Undefined | Integer,
+ Unrecoverable = Address | Unreachable | Return,
+ LegacyFsanitizeRecoverMask = Undefined | Integer
+};
+}
+
+/// Returns true if set of \p Sanitizers contain at least one sanitizer from
+/// \p Kinds.
+static bool hasOneOf(const clang::SanitizerSet &Sanitizers, unsigned Kinds) {
+#define SANITIZER(NAME, ID) \
+ if (Sanitizers.has(clang::SanitizerKind::ID) && (Kinds & ID)) \
+ return true;
+#include "clang/Basic/Sanitizers.def"
+ return false;
+}
+
+/// Adds all sanitizers from \p Kinds to \p Sanitizers.
+static void addAllOf(clang::SanitizerSet &Sanitizers, unsigned Kinds) {
+#define SANITIZER(NAME, ID) \
+ if (Kinds & ID) \
+ Sanitizers.set(clang::SanitizerKind::ID, true);
+#include "clang/Basic/Sanitizers.def"
+}
+
+static unsigned toSanitizeKind(clang::SanitizerKind K) {
+#define SANITIZER(NAME, ID) \
+ if (K == clang::SanitizerKind::ID) \
+ return ID;
+#include "clang/Basic/Sanitizers.def"
+ llvm_unreachable("Invalid SanitizerKind!");
+}
+
+/// Parse a single value from a -fsanitize= or -fno-sanitize= value list.
+/// Returns a member of the \c SanitizeKind enumeration, or \c 0
+/// if \p Value is not known.
+static unsigned parseValue(const char *Value);
+
+/// Parse a -fsanitize= or -fno-sanitize= argument's values, diagnosing any
+/// invalid components. Returns OR of members of \c SanitizeKind enumeration.
+static unsigned parseArgValues(const Driver &D, const llvm::opt::Arg *A,
+ bool DiagnoseErrors);
+
+/// Produce an argument string from ArgList \p Args, which shows how it
+/// provides some sanitizer kind from \p Mask. For example, the argument list
+/// "-fsanitize=thread,vptr -fsanitize=address" with mask \c NeedsUbsanRt
+/// would produce "-fsanitize=vptr".
+static std::string lastArgumentForMask(const Driver &D,
+ const llvm::opt::ArgList &Args,
+ unsigned Mask);
+
+static std::string lastArgumentForKind(const Driver &D,
+ const llvm::opt::ArgList &Args,
+ clang::SanitizerKind K) {
+ return lastArgumentForMask(D, Args, toSanitizeKind(K));
+}
+
+/// Produce an argument string from argument \p A, which shows how it provides
+/// a value in \p Mask. For instance, the argument
+/// "-fsanitize=address,alignment" with mask \c NeedsUbsanRt would produce
+/// "-fsanitize=alignment".
+static std::string describeSanitizeArg(const llvm::opt::Arg *A, unsigned Mask);
+
+/// Produce a string containing comma-separated names of sanitizers in \p
+/// Sanitizers set.
+static std::string toString(const clang::SanitizerSet &Sanitizers);
+
+/// For each sanitizer group bit set in \p Kinds, set the bits for sanitizers
+/// this group enables.
+static unsigned expandGroups(unsigned Kinds);
+
+static unsigned getToolchainUnsupportedKinds(const ToolChain &TC) {
+ bool IsFreeBSD = TC.getTriple().getOS() == llvm::Triple::FreeBSD;
+ bool IsLinux = TC.getTriple().getOS() == llvm::Triple::Linux;
+ bool IsX86 = TC.getTriple().getArch() == llvm::Triple::x86;
+ bool IsX86_64 = TC.getTriple().getArch() == llvm::Triple::x86_64;
+
+ unsigned Unsupported = 0;
+ if (!(IsLinux && IsX86_64)) {
+ Unsupported |= Memory | DataFlow;
+ }
+ if (!((IsLinux || IsFreeBSD) && IsX86_64)) {
+ Unsupported |= Thread;
+ }
+ if (!(IsLinux && (IsX86 || IsX86_64))) {
+ Unsupported |= Function;
+ }
+ return Unsupported;
+}
+
+bool SanitizerArgs::needsUbsanRt() const {
+ return !UbsanTrapOnError && hasOneOf(Sanitizers, NeedsUbsanRt);
+}
+
+bool SanitizerArgs::requiresPIE() const {
+ return AsanZeroBaseShadow || hasOneOf(Sanitizers, RequiresPIE);
+}
+
+bool SanitizerArgs::needsUnwindTables() const {
+ return hasOneOf(Sanitizers, NeedsUnwindTables);
+}
+
void SanitizerArgs::clear() {
- Kind = 0;
+ Sanitizers.clear();
+ RecoverableSanitizers.clear();
BlacklistFile = "";
+ SanitizeCoverage = 0;
MsanTrackOrigins = 0;
+ AsanFieldPadding = 0;
AsanZeroBaseShadow = false;
UbsanTrapOnError = false;
AsanSharedRuntime = false;
-}
-
-SanitizerArgs::SanitizerArgs() {
- clear();
+ LinkCXXRuntimes = false;
}
SanitizerArgs::SanitizerArgs(const ToolChain &TC,
const llvm::opt::ArgList &Args) {
clear();
- unsigned AllAdd = 0; // All kinds of sanitizers that were turned on
- // at least once (possibly, disabled further).
unsigned AllRemove = 0; // During the loop below, the accumulated set of
// sanitizers disabled by the current sanitizer
// argument or any argument after it.
unsigned DiagnosedKinds = 0; // All Kinds we have diagnosed up to now.
// Used to deduplicate diagnostics.
+ unsigned Kinds = 0;
+ unsigned NotSupported = getToolchainUnsupportedKinds(TC);
const Driver &D = TC.getDriver();
for (ArgList::const_reverse_iterator I = Args.rbegin(), E = Args.rend();
I != E; ++I) {
- unsigned Add, Remove;
- if (!parse(D, Args, *I, Add, Remove, true))
- continue;
- (*I)->claim();
-
- AllAdd |= expandGroups(Add);
- AllRemove |= expandGroups(Remove);
-
- // Avoid diagnosing any sanitizer which is disabled later.
- Add &= ~AllRemove;
- // At this point we have not expanded groups, so any unsupported sanitizers
- // in Add are those which have been explicitly enabled. Diagnose them.
- Add = filterUnsupportedKinds(TC, Add, Args, *I, /*DiagnoseErrors=*/true,
- DiagnosedKinds);
- Add = expandGroups(Add);
- // Group expansion may have enabled a sanitizer which is disabled later.
- Add &= ~AllRemove;
- // Silently discard any unsupported sanitizers implicitly enabled through
- // group expansion.
- Add = filterUnsupportedKinds(TC, Add, Args, *I, /*DiagnoseErrors=*/false,
- DiagnosedKinds);
-
- Kind |= Add;
+ const auto *Arg = *I;
+ if (Arg->getOption().matches(options::OPT_fsanitize_EQ)) {
+ Arg->claim();
+ unsigned Add = parseArgValues(D, Arg, true);
+
+ // Avoid diagnosing any sanitizer which is disabled later.
+ Add &= ~AllRemove;
+ // At this point we have not expanded groups, so any unsupported
+ // sanitizers in Add are those which have been explicitly enabled.
+ // Diagnose them.
+ if (unsigned KindsToDiagnose = Add & NotSupported & ~DiagnosedKinds) {
+ // Only diagnose the new kinds.
+ std::string Desc = describeSanitizeArg(*I, KindsToDiagnose);
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Desc << TC.getTriple().str();
+ DiagnosedKinds |= KindsToDiagnose;
+ }
+ Add &= ~NotSupported;
+
+ Add = expandGroups(Add);
+ // Group expansion may have enabled a sanitizer which is disabled later.
+ Add &= ~AllRemove;
+ // Silently discard any unsupported sanitizers implicitly enabled through
+ // group expansion.
+ Add &= ~NotSupported;
+
+ Kinds |= Add;
+ } else if (Arg->getOption().matches(options::OPT_fno_sanitize_EQ)) {
+ Arg->claim();
+ unsigned Remove = parseArgValues(D, Arg, true);
+ AllRemove |= expandGroups(Remove);
+ }
+ }
+ addAllOf(Sanitizers, Kinds);
+
+ // Parse -f(no-)?sanitize-recover flags.
+ unsigned RecoverableKinds = RecoverableByDefault;
+ unsigned DiagnosedUnrecoverableKinds = 0;
+ for (const auto *Arg : Args) {
+ if (Arg->getOption().matches(options::OPT_fsanitize_recover)) {
+ // FIXME: Add deprecation notice, and then remove this flag.
+ RecoverableKinds |= expandGroups(LegacyFsanitizeRecoverMask);
+ Arg->claim();
+ } else if (Arg->getOption().matches(options::OPT_fno_sanitize_recover)) {
+ // FIXME: Add deprecation notice, and then remove this flag.
+ RecoverableKinds &= ~expandGroups(LegacyFsanitizeRecoverMask);
+ Arg->claim();
+ } else if (Arg->getOption().matches(options::OPT_fsanitize_recover_EQ)) {
+ unsigned Add = parseArgValues(D, Arg, true);
+ // Report error if user explicitly tries to recover from unrecoverable
+ // sanitizer.
+ if (unsigned KindsToDiagnose =
+ Add & Unrecoverable & ~DiagnosedUnrecoverableKinds) {
+ SanitizerSet SetToDiagnose;
+ addAllOf(SetToDiagnose, KindsToDiagnose);
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << Arg->getOption().getName() << toString(SetToDiagnose);
+ DiagnosedUnrecoverableKinds |= KindsToDiagnose;
+ }
+ RecoverableKinds |= expandGroups(Add);
+ Arg->claim();
+ } else if (Arg->getOption().matches(options::OPT_fno_sanitize_recover_EQ)) {
+ RecoverableKinds &= ~expandGroups(parseArgValues(D, Arg, true));
+ Arg->claim();
+ }
}
+ RecoverableKinds &= Kinds;
+ RecoverableKinds &= ~Unrecoverable;
+ addAllOf(RecoverableSanitizers, RecoverableKinds);
UbsanTrapOnError =
Args.hasFlag(options::OPT_fsanitize_undefined_trap_on_error,
options::OPT_fno_sanitize_undefined_trap_on_error, false);
// Warn about undefined sanitizer options that require runtime support.
- if (UbsanTrapOnError && notAllowedWithTrap()) {
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << lastArgumentForKind(D, Args, NotAllowedWithTrap)
+ if (UbsanTrapOnError && hasOneOf(Sanitizers, NotAllowedWithTrap)) {
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << lastArgumentForMask(D, Args, NotAllowedWithTrap)
<< "-fsanitize-undefined-trap-on-error";
}
- // Only one runtime library can be used at once.
- bool NeedsAsan = needsAsanRt();
- bool NeedsTsan = needsTsanRt();
- bool NeedsMsan = needsMsanRt();
- bool NeedsLsan = needsLeakDetection();
+ // Check for incompatible sanitizers.
+ bool NeedsAsan = Sanitizers.has(SanitizerKind::Address);
+ bool NeedsTsan = Sanitizers.has(SanitizerKind::Thread);
+ bool NeedsMsan = Sanitizers.has(SanitizerKind::Memory);
+ bool NeedsLsan = Sanitizers.has(SanitizerKind::Leak);
if (NeedsAsan && NeedsTsan)
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << lastArgumentForKind(D, Args, NeedsAsanRt)
- << lastArgumentForKind(D, Args, NeedsTsanRt);
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << lastArgumentForKind(D, Args, SanitizerKind::Address)
+ << lastArgumentForKind(D, Args, SanitizerKind::Thread);
if (NeedsAsan && NeedsMsan)
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << lastArgumentForKind(D, Args, NeedsAsanRt)
- << lastArgumentForKind(D, Args, NeedsMsanRt);
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << lastArgumentForKind(D, Args, SanitizerKind::Address)
+ << lastArgumentForKind(D, Args, SanitizerKind::Memory);
if (NeedsTsan && NeedsMsan)
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << lastArgumentForKind(D, Args, NeedsTsanRt)
- << lastArgumentForKind(D, Args, NeedsMsanRt);
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << lastArgumentForKind(D, Args, SanitizerKind::Thread)
+ << lastArgumentForKind(D, Args, SanitizerKind::Memory);
if (NeedsLsan && NeedsTsan)
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << lastArgumentForKind(D, Args, NeedsLeakDetection)
- << lastArgumentForKind(D, Args, NeedsTsanRt);
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << lastArgumentForKind(D, Args, SanitizerKind::Leak)
+ << lastArgumentForKind(D, Args, SanitizerKind::Thread);
if (NeedsLsan && NeedsMsan)
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << lastArgumentForKind(D, Args, NeedsLeakDetection)
- << lastArgumentForKind(D, Args, NeedsMsanRt);
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << lastArgumentForKind(D, Args, SanitizerKind::Leak)
+ << lastArgumentForKind(D, Args, SanitizerKind::Memory);
// FIXME: Currently -fsanitize=leak is silently ignored in the presence of
// -fsanitize=address. Perhaps it should print an error, or perhaps
// -f(-no)sanitize=leak should change whether leak detection is enabled by
@@ -124,19 +294,18 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
std::unique_ptr<llvm::SpecialCaseList> SCL(
llvm::SpecialCaseList::create(BLPath, BLError));
if (!SCL.get())
- D.Diag(diag::err_drv_malformed_sanitizer_blacklist) << BLError;
+ D.Diag(clang::diag::err_drv_malformed_sanitizer_blacklist) << BLError;
else
BlacklistFile = BLPath;
} else {
- D.Diag(diag::err_drv_no_such_file) << BLPath;
+ D.Diag(clang::diag::err_drv_no_such_file) << BLPath;
}
}
} else {
// If no -fsanitize-blacklist option is specified, try to look up for
// blacklist in the resource directory.
std::string BLPath;
- if (getDefaultBlacklistForKind(D, Kind, BLPath) &&
- llvm::sys::fs::exists(BLPath))
+ if (getDefaultBlacklist(D, BLPath) && llvm::sys::fs::exists(BLPath))
BlacklistFile = BLPath;
}
@@ -155,32 +324,85 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
StringRef S = A->getValue();
if (S.getAsInteger(0, MsanTrackOrigins) || MsanTrackOrigins < 0 ||
MsanTrackOrigins > 2) {
- D.Diag(diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
}
}
}
}
+ // Parse -fsanitize-coverage=N. Currently one of asan/msan/lsan is required.
+ if (hasOneOf(Sanitizers, SupportsCoverage)) {
+ if (Arg *A = Args.getLastArg(options::OPT_fsanitize_coverage)) {
+ StringRef S = A->getValue();
+ // Legal values are 0..4.
+ if (S.getAsInteger(0, SanitizeCoverage) || SanitizeCoverage < 0 ||
+ SanitizeCoverage > 4)
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ }
+ }
+
if (NeedsAsan) {
AsanSharedRuntime =
Args.hasArg(options::OPT_shared_libasan) ||
(TC.getTriple().getEnvironment() == llvm::Triple::Android);
AsanZeroBaseShadow =
(TC.getTriple().getEnvironment() == llvm::Triple::Android);
+ if (Arg *A =
+ Args.getLastArg(options::OPT_fsanitize_address_field_padding)) {
+ StringRef S = A->getValue();
+ // Legal values are 0 and 1, 2, but in future we may add more levels.
+ if (S.getAsInteger(0, AsanFieldPadding) || AsanFieldPadding < 0 ||
+ AsanFieldPadding > 2) {
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ }
+ }
+
+ if (Arg *WindowsDebugRTArg =
+ Args.getLastArg(options::OPT__SLASH_MTd, options::OPT__SLASH_MT,
+ options::OPT__SLASH_MDd, options::OPT__SLASH_MD,
+ options::OPT__SLASH_LDd, options::OPT__SLASH_LD)) {
+ switch (WindowsDebugRTArg->getOption().getID()) {
+ case options::OPT__SLASH_MTd:
+ case options::OPT__SLASH_MDd:
+ case options::OPT__SLASH_LDd:
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << WindowsDebugRTArg->getAsString(Args)
+ << lastArgumentForKind(D, Args, SanitizerKind::Address);
+ D.Diag(clang::diag::note_drv_address_sanitizer_debug_runtime);
+ }
+ }
+ }
+
+ // Parse -link-cxx-sanitizer flag.
+ LinkCXXRuntimes =
+ Args.hasArg(options::OPT_fsanitize_link_cxx_runtime) || D.CCCIsCXX();
+}
+
+static std::string toString(const clang::SanitizerSet &Sanitizers) {
+ std::string Res;
+#define SANITIZER(NAME, ID) \
+ if (Sanitizers.has(clang::SanitizerKind::ID)) { \
+ if (!Res.empty()) \
+ Res += ","; \
+ Res += NAME; \
}
+#include "clang/Basic/Sanitizers.def"
+ return Res;
}
void SanitizerArgs::addArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
- if (!Kind)
+ if (Sanitizers.empty())
return;
- SmallString<256> SanitizeOpt("-fsanitize=");
-#define SANITIZER(NAME, ID) \
- if (Kind & ID) \
- SanitizeOpt += NAME ",";
-#include "clang/Basic/Sanitizers.def"
- SanitizeOpt.pop_back();
- CmdArgs.push_back(Args.MakeArgString(SanitizeOpt));
+ CmdArgs.push_back(Args.MakeArgString("-fsanitize=" + toString(Sanitizers)));
+
+ if (!RecoverableSanitizers.empty())
+ CmdArgs.push_back(Args.MakeArgString("-fsanitize-recover=" +
+ toString(RecoverableSanitizers)));
+
+ if (UbsanTrapOnError)
+ CmdArgs.push_back("-fsanitize-undefined-trap-on-error");
+
if (!BlacklistFile.empty()) {
SmallString<64> BlacklistOpt("-fsanitize-blacklist=");
BlacklistOpt += BlacklistFile;
@@ -190,13 +412,38 @@ void SanitizerArgs::addArgs(const llvm::opt::ArgList &Args,
if (MsanTrackOrigins)
CmdArgs.push_back(Args.MakeArgString("-fsanitize-memory-track-origins=" +
llvm::utostr(MsanTrackOrigins)));
-
+ if (AsanFieldPadding)
+ CmdArgs.push_back(Args.MakeArgString("-fsanitize-address-field-padding=" +
+ llvm::utostr(AsanFieldPadding)));
+ if (SanitizeCoverage)
+ CmdArgs.push_back(Args.MakeArgString("-fsanitize-coverage=" +
+ llvm::utostr(SanitizeCoverage)));
// Workaround for PR16386.
- if (needsMsanRt())
+ if (Sanitizers.has(SanitizerKind::Memory))
CmdArgs.push_back(Args.MakeArgString("-fno-assume-sane-operator-new"));
}
-unsigned SanitizerArgs::parse(const char *Value) {
+bool SanitizerArgs::getDefaultBlacklist(const Driver &D, std::string &BLPath) {
+ const char *BlacklistFile = nullptr;
+ if (Sanitizers.has(SanitizerKind::Address))
+ BlacklistFile = "asan_blacklist.txt";
+ else if (Sanitizers.has(SanitizerKind::Memory))
+ BlacklistFile = "msan_blacklist.txt";
+ else if (Sanitizers.has(SanitizerKind::Thread))
+ BlacklistFile = "tsan_blacklist.txt";
+ else if (Sanitizers.has(SanitizerKind::DataFlow))
+ BlacklistFile = "dfsan_abilist.txt";
+
+ if (BlacklistFile) {
+ SmallString<64> Path(D.ResourceDir);
+ llvm::sys::path::append(Path, BlacklistFile);
+ BLPath = Path.str();
+ return true;
+ }
+ return false;
+}
+
+unsigned parseValue(const char *Value) {
unsigned ParsedKind = llvm::StringSwitch<SanitizeKind>(Value)
#define SANITIZER(NAME, ID) .Case(NAME, ID)
#define SANITIZER_GROUP(NAME, ID, ALIAS) .Case(NAME, ID##Group)
@@ -205,107 +452,65 @@ unsigned SanitizerArgs::parse(const char *Value) {
return ParsedKind;
}
-unsigned SanitizerArgs::expandGroups(unsigned Kinds) {
+unsigned expandGroups(unsigned Kinds) {
#define SANITIZER(NAME, ID)
#define SANITIZER_GROUP(NAME, ID, ALIAS) if (Kinds & ID##Group) Kinds |= ID;
#include "clang/Basic/Sanitizers.def"
return Kinds;
}
-void SanitizerArgs::filterUnsupportedMask(const ToolChain &TC, unsigned &Kinds,
- unsigned Mask,
- const llvm::opt::ArgList &Args,
- const llvm::opt::Arg *A,
- bool DiagnoseErrors,
- unsigned &DiagnosedKinds) {
- unsigned MaskedKinds = Kinds & Mask;
- if (!MaskedKinds)
- return;
- Kinds &= ~Mask;
- // Do we have new kinds to diagnose?
- if (DiagnoseErrors && (DiagnosedKinds & MaskedKinds) != MaskedKinds) {
- // Only diagnose the new kinds.
- std::string Desc =
- describeSanitizeArg(Args, A, MaskedKinds & ~DiagnosedKinds);
- TC.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
- << Desc << TC.getTriple().str();
- DiagnosedKinds |= MaskedKinds;
- }
-}
-
-unsigned SanitizerArgs::filterUnsupportedKinds(const ToolChain &TC,
- unsigned Kinds,
- const llvm::opt::ArgList &Args,
- const llvm::opt::Arg *A,
- bool DiagnoseErrors,
- unsigned &DiagnosedKinds) {
- bool IsLinux = TC.getTriple().getOS() == llvm::Triple::Linux;
- bool IsX86 = TC.getTriple().getArch() == llvm::Triple::x86;
- bool IsX86_64 = TC.getTriple().getArch() == llvm::Triple::x86_64;
- if (!(IsLinux && IsX86_64)) {
- filterUnsupportedMask(TC, Kinds, Thread | Memory | DataFlow, Args, A,
- DiagnoseErrors, DiagnosedKinds);
- }
- if (!(IsLinux && (IsX86 || IsX86_64))) {
- filterUnsupportedMask(TC, Kinds, Function, Args, A, DiagnoseErrors,
- DiagnosedKinds);
- }
- return Kinds;
-}
-
-unsigned SanitizerArgs::parse(const Driver &D, const llvm::opt::Arg *A,
- bool DiagnoseErrors) {
- unsigned Kind = 0;
+unsigned parseArgValues(const Driver &D, const llvm::opt::Arg *A,
+ bool DiagnoseErrors) {
+ assert((A->getOption().matches(options::OPT_fsanitize_EQ) ||
+ A->getOption().matches(options::OPT_fno_sanitize_EQ) ||
+ A->getOption().matches(options::OPT_fsanitize_recover_EQ) ||
+ A->getOption().matches(options::OPT_fno_sanitize_recover_EQ)) &&
+ "Invalid argument in parseArgValues!");
+ unsigned Kinds = 0;
for (unsigned I = 0, N = A->getNumValues(); I != N; ++I) {
- if (unsigned K = parse(A->getValue(I)))
- Kind |= K;
- else if (DiagnoseErrors)
- D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << A->getValue(I);
- }
- return Kind;
-}
+ const char *Value = A->getValue(I);
+ unsigned Kind;
+ // Special case: don't accept -fsanitize=all.
+ if (A->getOption().matches(options::OPT_fsanitize_EQ) &&
+ 0 == strcmp("all", Value))
+ Kind = 0;
+ else
+ Kind = parseValue(Value);
-bool SanitizerArgs::parse(const Driver &D, const llvm::opt::ArgList &Args,
- const llvm::opt::Arg *A, unsigned &Add,
- unsigned &Remove, bool DiagnoseErrors) {
- Add = 0;
- Remove = 0;
- if (A->getOption().matches(options::OPT_fsanitize_EQ)) {
- Add = parse(D, A, DiagnoseErrors);
- } else if (A->getOption().matches(options::OPT_fno_sanitize_EQ)) {
- Remove = parse(D, A, DiagnoseErrors);
- } else {
- // Flag is not relevant to sanitizers.
- return false;
+ if (Kind)
+ Kinds |= Kind;
+ else if (DiagnoseErrors)
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Value;
}
- return true;
+ return Kinds;
}
-std::string SanitizerArgs::lastArgumentForKind(const Driver &D,
- const llvm::opt::ArgList &Args,
- unsigned Kind) {
+std::string lastArgumentForMask(const Driver &D, const llvm::opt::ArgList &Args,
+ unsigned Mask) {
for (llvm::opt::ArgList::const_reverse_iterator I = Args.rbegin(),
E = Args.rend();
I != E; ++I) {
- unsigned Add, Remove;
- if (parse(D, Args, *I, Add, Remove, false) &&
- (expandGroups(Add) & Kind))
- return describeSanitizeArg(Args, *I, Kind);
- Kind &= ~Remove;
+ const auto *Arg = *I;
+ if (Arg->getOption().matches(options::OPT_fsanitize_EQ)) {
+ unsigned AddKinds = expandGroups(parseArgValues(D, Arg, false));
+ if (AddKinds & Mask)
+ return describeSanitizeArg(Arg, Mask);
+ } else if (Arg->getOption().matches(options::OPT_fno_sanitize_EQ)) {
+ unsigned RemoveKinds = expandGroups(parseArgValues(D, Arg, false));
+ Mask &= ~RemoveKinds;
+ }
}
llvm_unreachable("arg list didn't provide expected value");
}
-std::string SanitizerArgs::describeSanitizeArg(const llvm::opt::ArgList &Args,
- const llvm::opt::Arg *A,
- unsigned Mask) {
- if (!A->getOption().matches(options::OPT_fsanitize_EQ))
- return A->getAsString(Args);
+std::string describeSanitizeArg(const llvm::opt::Arg *A, unsigned Mask) {
+ assert(A->getOption().matches(options::OPT_fsanitize_EQ)
+ && "Invalid argument in describeSanitizerArg!");
std::string Sanitizers;
for (unsigned I = 0, N = A->getNumValues(); I != N; ++I) {
- if (expandGroups(parse(A->getValue(I))) & Mask) {
+ if (expandGroups(parseValue(A->getValue(I))) & Mask) {
if (!Sanitizers.empty())
Sanitizers += ",";
Sanitizers += A->getValue(I);
@@ -315,24 +520,3 @@ std::string SanitizerArgs::describeSanitizeArg(const llvm::opt::ArgList &Args,
assert(!Sanitizers.empty() && "arg didn't provide expected value");
return "-fsanitize=" + Sanitizers;
}
-
-bool SanitizerArgs::getDefaultBlacklistForKind(const Driver &D, unsigned Kind,
- std::string &BLPath) {
- const char *BlacklistFile = nullptr;
- if (Kind & NeedsAsanRt)
- BlacklistFile = "asan_blacklist.txt";
- else if (Kind & NeedsMsanRt)
- BlacklistFile = "msan_blacklist.txt";
- else if (Kind & NeedsTsanRt)
- BlacklistFile = "tsan_blacklist.txt";
- else if (Kind & NeedsDfsanRt)
- BlacklistFile = "dfsan_abilist.txt";
-
- if (BlacklistFile) {
- SmallString<64> Path(D.ResourceDir);
- llvm::sys::path::append(Path, BlacklistFile);
- BLPath = Path.str();
- return true;
- }
- return false;
-}
diff --git a/lib/Driver/Tool.cpp b/lib/Driver/Tool.cpp
index b93864ff8bf2..7142e822f16e 100644
--- a/lib/Driver/Tool.cpp
+++ b/lib/Driver/Tool.cpp
@@ -11,11 +11,13 @@
using namespace clang::driver;
-Tool::Tool(const char *_Name, const char *_ShortName,
- const ToolChain &TC) : Name(_Name), ShortName(_ShortName),
- TheToolChain(TC)
-{
-}
+Tool::Tool(const char *_Name, const char *_ShortName, const ToolChain &TC,
+ ResponseFileSupport _ResponseSupport,
+ llvm::sys::WindowsEncodingMethod _ResponseEncoding,
+ const char *_ResponseFlag)
+ : Name(_Name), ShortName(_ShortName), TheToolChain(TC),
+ ResponseSupport(_ResponseSupport), ResponseEncoding(_ResponseEncoding),
+ ResponseFlag(_ResponseFlag) {}
Tool::~Tool() {
}
diff --git a/lib/Driver/ToolChain.cpp b/lib/Driver/ToolChain.cpp
index 4f90d083c7dc..2bcfecf6bde6 100644
--- a/lib/Driver/ToolChain.cpp
+++ b/lib/Driver/ToolChain.cpp
@@ -27,8 +27,13 @@ using namespace clang;
using namespace llvm::opt;
ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
- const ArgList &A)
- : D(D), Triple(T), Args(A) {
+ const ArgList &Args)
+ : D(D), Triple(T), Args(Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_mthread_model))
+ if (!isThreadModelSupported(A->getValue()))
+ D.Diag(diag::err_drv_invalid_thread_model_for_target)
+ << A->getValue()
+ << A->getAsString(Args);
}
ToolChain::~ToolChain() {
@@ -50,7 +55,7 @@ const SanitizerArgs& ToolChain::getSanitizerArgs() const {
return *SanitizerArguments.get();
}
-std::string ToolChain::getDefaultUniversalArchName() const {
+StringRef ToolChain::getDefaultUniversalArchName() const {
// In universal driver terms, the arch name accepted by -arch isn't exactly
// the same as the ones that appear in the triple. Roughly speaking, this is
// an inverse of the darwin::getArchTypeForDarwinArchName() function, but the
@@ -124,6 +129,7 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::AnalyzeJobClass:
case Action::MigrateJobClass:
case Action::VerifyPCHJobClass:
+ case Action::BackendJobClass:
return getClang();
}
@@ -201,6 +207,19 @@ ObjCRuntime ToolChain::getDefaultObjCRuntime(bool isNonFragile) const {
VersionTuple());
}
+bool ToolChain::isThreadModelSupported(const StringRef Model) const {
+ if (Model == "single") {
+ // FIXME: 'single' is only supported on ARM so far.
+ return Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::armeb ||
+ Triple.getArch() == llvm::Triple::thumb ||
+ Triple.getArch() == llvm::Triple::thumbeb;
+ } else if (Model == "posix")
+ return true;
+
+ return false;
+}
+
std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
types::ID InputType) const {
switch (getTriple().getArch()) {
@@ -221,6 +240,17 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
}
return Triple.getTriple();
}
+ case llvm::Triple::aarch64: {
+ llvm::Triple Triple = getTriple();
+ if (!Triple.isOSBinFormatMachO())
+ return getTripleString();
+
+ // FIXME: older versions of ld64 expect the "arm64" component in the actual
+ // triple string and query it to determine whether an LTO file can be
+ // handled. Remove this when we don't care any more.
+ Triple.setArchName("arm64");
+ return Triple.getTriple();
+ }
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
diff --git a/lib/Driver/ToolChains.cpp b/lib/Driver/ToolChains.cpp
index b46f69de96a3..4d97ab3bf485 100644
--- a/lib/Driver/ToolChains.cpp
+++ b/lib/Driver/ToolChains.cpp
@@ -133,11 +133,11 @@ static const char *GetArmArchForMCpu(StringRef Value) {
.Case("xscale", "xscale")
.Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s", "arm1176jzf-s", "armv6")
.Case("cortex-m0", "armv6m")
- .Cases("cortex-a5", "cortex-a7", "cortex-a8", "cortex-a9-mp", "armv7")
- .Cases("cortex-a9", "cortex-a12", "cortex-a15", "krait", "armv7")
+ .Cases("cortex-a5", "cortex-a7", "cortex-a8", "armv7")
+ .Cases("cortex-a9", "cortex-a12", "cortex-a15", "cortex-a17", "krait", "armv7")
.Cases("cortex-r4", "cortex-r5", "armv7r")
.Case("cortex-m3", "armv7m")
- .Case("cortex-m4", "armv7em")
+ .Cases("cortex-m4", "cortex-m7", "armv7em")
.Case("swift", "armv7s")
.Default(nullptr);
}
@@ -147,7 +147,7 @@ static bool isSoftFloatABI(const ArgList &Args) {
options::OPT_mfloat_abi_EQ);
if (!A)
return false;
-
+
return A->getOption().matches(options::OPT_msoft_float) ||
(A->getOption().matches(options::OPT_mfloat_abi_EQ) &&
A->getValue() == StringRef("soft"));
@@ -156,7 +156,10 @@ static bool isSoftFloatABI(const ArgList &Args) {
StringRef MachO::getMachOArchName(const ArgList &Args) const {
switch (getTriple().getArch()) {
default:
- return getArchName();
+ return getDefaultUniversalArchName();
+
+ case llvm::Triple::aarch64:
+ return "arm64";
case llvm::Triple::thumb:
case llvm::Triple::arm: {
@@ -288,17 +291,37 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
}
void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
- StringRef DarwinStaticLib, bool AlwaysLink,
- bool IsEmbedded) const {
- SmallString<128> P(getDriver().ResourceDir);
- llvm::sys::path::append(P, "lib", IsEmbedded ? "macho_embedded" : "darwin",
- DarwinStaticLib);
+ StringRef DarwinLibName, bool AlwaysLink,
+ bool IsEmbedded, bool AddRPath) const {
+ SmallString<128> Dir(getDriver().ResourceDir);
+ llvm::sys::path::append(Dir, "lib", IsEmbedded ? "macho_embedded" : "darwin");
+
+ SmallString<128> P(Dir);
+ llvm::sys::path::append(P, DarwinLibName);
// For now, allow missing resource libraries to support developers who may
// not have compiler-rt checked out or integrated into their build (unless
// we explicitly force linking with this library).
if (AlwaysLink || llvm::sys::fs::exists(P.str()))
CmdArgs.push_back(Args.MakeArgString(P.str()));
+
+ // Adding the rpaths might negatively interact when other rpaths are involved,
+ // so we should make sure we add the rpaths last, after all user-specified
+ // rpaths. This is currently true from this place, but we need to be
+ // careful if this function is ever called before user's rpaths are emitted.
+ if (AddRPath) {
+ assert(DarwinLibName.endswith(".dylib") && "must be a dynamic library");
+
+ // Add @executable_path to rpath to support having the dylib copied with
+ // the executable.
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back("@executable_path");
+
+ // Add the path to the resource dir to rpath to support using the dylib
+ // from the default location without copying.
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back(Args.MakeArgString(Dir.str()));
+ }
}
void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
@@ -330,7 +353,8 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
}
// If we are building profile support, link that library in.
- if (Args.hasArg(options::OPT_fprofile_arcs) ||
+ if (Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false) ||
Args.hasArg(options::OPT_fprofile_generate) ||
Args.hasArg(options::OPT_fprofile_instr_generate) ||
Args.hasArg(options::OPT_fcreate_profile) ||
@@ -375,12 +399,14 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
if (isTargetMacOS()) {
AddLinkRuntimeLib(Args, CmdArgs,
"libclang_rt.asan_osx_dynamic.dylib",
- true);
+ /*AlwaysLink*/ true, /*IsEmbedded*/ false,
+ /*AddRPath*/ true);
} else {
if (isTargetIOSSimulator()) {
AddLinkRuntimeLib(Args, CmdArgs,
"libclang_rt.asan_iossim_dynamic.dylib",
- true);
+ /*AlwaysLink*/ true, /*IsEmbedded*/ false,
+ /*AddRPath*/ true);
}
}
}
@@ -396,8 +422,7 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
// it never went into the SDK.
// Linking against libgcc_s.1 isn't needed for iOS 5.0+
if (isIPhoneOSVersionLT(5, 0) && !isTargetIOSSimulator() &&
- (getTriple().getArch() != llvm::Triple::arm64 &&
- getTriple().getArch() != llvm::Triple::aarch64))
+ getTriple().getArch() != llvm::Triple::aarch64)
CmdArgs.push_back("-lgcc_s.1");
// We currently always need a static runtime library for iOS.
@@ -453,31 +478,21 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
Arg *OSXVersion = Args.getLastArg(options::OPT_mmacosx_version_min_EQ);
Arg *iOSVersion = Args.getLastArg(options::OPT_miphoneos_version_min_EQ);
- Arg *iOSSimVersion = Args.getLastArg(
- options::OPT_mios_simulator_version_min_EQ);
- if (OSXVersion && (iOSVersion || iOSSimVersion)) {
+ if (OSXVersion && iOSVersion) {
getDriver().Diag(diag::err_drv_argument_not_allowed_with)
<< OSXVersion->getAsString(Args)
- << (iOSVersion ? iOSVersion : iOSSimVersion)->getAsString(Args);
- iOSVersion = iOSSimVersion = nullptr;
- } else if (iOSVersion && iOSSimVersion) {
- getDriver().Diag(diag::err_drv_argument_not_allowed_with)
- << iOSVersion->getAsString(Args)
- << iOSSimVersion->getAsString(Args);
- iOSSimVersion = nullptr;
- } else if (!OSXVersion && !iOSVersion && !iOSSimVersion) {
+ << iOSVersion->getAsString(Args);
+ iOSVersion = nullptr;
+ } else if (!OSXVersion && !iOSVersion) {
// If no deployment target was specified on the command line, check for
// environment defines.
StringRef OSXTarget;
StringRef iOSTarget;
- StringRef iOSSimTarget;
if (char *env = ::getenv("MACOSX_DEPLOYMENT_TARGET"))
OSXTarget = env;
if (char *env = ::getenv("IPHONEOS_DEPLOYMENT_TARGET"))
iOSTarget = env;
- if (char *env = ::getenv("IOS_SIMULATOR_DEPLOYMENT_TARGET"))
- iOSSimTarget = env;
// If no '-miphoneos-version-min' specified on the command line and
// IPHONEOS_DEPLOYMENT_TARGET is not defined, see if we can set the default
@@ -500,23 +515,10 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
MachOArchName == "arm64"))
iOSTarget = iOSVersionMin;
- // Handle conflicting deployment targets
- //
- // FIXME: Don't hardcode default here.
-
- // Do not allow conflicts with the iOS simulator target.
- if (!iOSSimTarget.empty() && (!OSXTarget.empty() || !iOSTarget.empty())) {
- getDriver().Diag(diag::err_drv_conflicting_deployment_targets)
- << "IOS_SIMULATOR_DEPLOYMENT_TARGET"
- << (!OSXTarget.empty() ? "MACOSX_DEPLOYMENT_TARGET" :
- "IPHONEOS_DEPLOYMENT_TARGET");
- }
-
// Allow conflicts among OSX and iOS for historical reasons, but choose the
// default platform.
if (!OSXTarget.empty() && !iOSTarget.empty()) {
if (getTriple().getArch() == llvm::Triple::arm ||
- getTriple().getArch() == llvm::Triple::arm64 ||
getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::thumb)
OSXTarget = "";
@@ -532,11 +534,6 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
const Option O = Opts.getOption(options::OPT_miphoneos_version_min_EQ);
iOSVersion = Args.MakeJoinedArg(nullptr, O, iOSTarget);
Args.append(iOSVersion);
- } else if (!iOSSimTarget.empty()) {
- const Option O = Opts.getOption(
- options::OPT_mios_simulator_version_min_EQ);
- iOSSimVersion = Args.MakeJoinedArg(nullptr, O, iOSSimTarget);
- Args.append(iOSSimVersion);
} else if (MachOArchName != "armv6m" && MachOArchName != "armv7m" &&
MachOArchName != "armv7em") {
// Otherwise, assume we are targeting OS X.
@@ -551,43 +548,30 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
Platform = MacOS;
else if (iOSVersion)
Platform = IPhoneOS;
- else if (iOSSimVersion)
- Platform = IPhoneOSSimulator;
else
llvm_unreachable("Unable to infer Darwin variant");
- // Reject invalid architecture combinations.
- if (iOSSimVersion && (getTriple().getArch() != llvm::Triple::x86 &&
- getTriple().getArch() != llvm::Triple::x86_64)) {
- getDriver().Diag(diag::err_drv_invalid_arch_for_deployment_target)
- << getTriple().getArchName() << iOSSimVersion->getAsString(Args);
- }
-
// Set the tool chain target information.
unsigned Major, Minor, Micro;
bool HadExtra;
if (Platform == MacOS) {
- assert((!iOSVersion && !iOSSimVersion) && "Unknown target platform!");
+ assert(!iOSVersion && "Unknown target platform!");
if (!Driver::GetReleaseVersion(OSXVersion->getValue(), Major, Minor,
Micro, HadExtra) || HadExtra ||
Major != 10 || Minor >= 100 || Micro >= 100)
getDriver().Diag(diag::err_drv_invalid_version_number)
<< OSXVersion->getAsString(Args);
- } else if (Platform == IPhoneOS || Platform == IPhoneOSSimulator) {
- const Arg *Version = iOSVersion ? iOSVersion : iOSSimVersion;
- assert(Version && "Unknown target platform!");
- if (!Driver::GetReleaseVersion(Version->getValue(), Major, Minor,
+ } else if (Platform == IPhoneOS) {
+ assert(iOSVersion && "Unknown target platform!");
+ if (!Driver::GetReleaseVersion(iOSVersion->getValue(), Major, Minor,
Micro, HadExtra) || HadExtra ||
Major >= 10 || Minor >= 100 || Micro >= 100)
getDriver().Diag(diag::err_drv_invalid_version_number)
- << Version->getAsString(Args);
+ << iOSVersion->getAsString(Args);
} else
llvm_unreachable("unknown kind of Darwin platform");
- // In GCC, the simulator historically was treated as being OS X in some
- // contexts, like determining the link logic, despite generally being called
- // with an iOS deployment target. For compatibility, we detect the
- // simulator as iOS + x86, and treat it differently in a few contexts.
+ // Recognize iOS targets with an x86 architecture as the iOS simulator.
if (iOSVersion && (getTriple().getArch() == llvm::Triple::x86 ||
getTriple().getArch() == llvm::Triple::x86_64))
Platform = IPhoneOSSimulator;
@@ -653,7 +637,6 @@ void DarwinClang::AddCCKextLibArgs(const ArgList &Args,
// Use the newer cc_kext for iOS ARM after 6.0.
if (!isTargetIPhoneOS() || isTargetIOSSimulator() ||
- getTriple().getArch() == llvm::Triple::arm64 ||
getTriple().getArch() == llvm::Triple::aarch64 ||
!isIPhoneOSVersionLT(6, 0)) {
llvm::sys::path::append(P, "libclang_rt.cc_kext.a");
@@ -919,9 +902,7 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
// FIXME: It would be far better to avoid inserting those -static arguments,
// but we can't check the deployment target in the translation code until
// it is set here.
- if (isTargetIOSBased() && !isIPhoneOSVersionLT(6, 0) &&
- getTriple().getArch() != llvm::Triple::arm64 &&
- getTriple().getArch() != llvm::Triple::aarch64) {
+ if (isTargetIOSBased() && !isIPhoneOSVersionLT(6, 0)) {
for (ArgList::iterator it = DAL->begin(), ie = DAL->end(); it != ie; ) {
Arg *A = *it;
++it;
@@ -988,7 +969,6 @@ bool MachO::isPIEDefault() const {
bool MachO::isPICDefaultForced() const {
return (getArch() == llvm::Triple::x86_64 ||
- getArch() == llvm::Triple::arm64 ||
getArch() == llvm::Triple::aarch64);
}
@@ -1001,14 +981,7 @@ void Darwin::addMinVersionArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
VersionTuple TargetVersion = getTargetVersion();
- // If we had an explicit -mios-simulator-version-min argument, honor that,
- // otherwise use the traditional deployment targets. We can't just check the
- // is-sim attribute because existing code follows this path, and the linker
- // may not handle the argument.
- //
- // FIXME: We may be able to remove this, once we can verify no one depends on
- // it.
- if (Args.hasArg(options::OPT_mios_simulator_version_min_EQ))
+ if (isTargetIOSSimulator())
CmdArgs.push_back("-ios_simulator_version_min");
else if (isTargetIOSBased())
CmdArgs.push_back("-iphoneos_version_min");
@@ -1078,8 +1051,7 @@ void Darwin::addStartObjectFileArgs(const llvm::opt::ArgList &Args,
if (isTargetIOSSimulator()) {
; // iOS simulator does not need crt1.o.
} else if (isTargetIPhoneOS()) {
- if (getArch() == llvm::Triple::arm64 ||
- getArch() == llvm::Triple::aarch64)
+ if (getArch() == llvm::Triple::aarch64)
; // iOS does not need any crt1 files for arm64
else if (isIPhoneOSVersionLT(3, 1))
CmdArgs.push_back("-lcrt1.o");
@@ -1222,8 +1194,8 @@ Generic_GCC::GCCInstallationDetector::init(
// The library directories which may contain GCC installations.
SmallVector<StringRef, 4> CandidateLibDirs, CandidateBiarchLibDirs;
// The compatible GCC triples for this particular architecture.
- SmallVector<StringRef, 10> CandidateTripleAliases;
- SmallVector<StringRef, 10> CandidateBiarchTripleAliases;
+ SmallVector<StringRef, 16> CandidateTripleAliases;
+ SmallVector<StringRef, 16> CandidateBiarchTripleAliases;
CollectLibDirsAndTriples(TargetTriple, BiarchVariantTriple, CandidateLibDirs,
CandidateTripleAliases, CandidateBiarchLibDirs,
CandidateBiarchTripleAliases);
@@ -1397,176 +1369,116 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
"s390x-suse-linux", "s390x-redhat-linux"
};
+ using std::begin;
+ using std::end;
+
switch (TargetTriple.getArch()) {
- case llvm::Triple::arm64:
case llvm::Triple::aarch64:
- LibDirs.append(AArch64LibDirs,
- AArch64LibDirs + llvm::array_lengthof(AArch64LibDirs));
- TripleAliases.append(AArch64Triples,
- AArch64Triples + llvm::array_lengthof(AArch64Triples));
- BiarchLibDirs.append(AArch64LibDirs,
- AArch64LibDirs + llvm::array_lengthof(AArch64LibDirs));
- BiarchTripleAliases.append(
- AArch64Triples, AArch64Triples + llvm::array_lengthof(AArch64Triples));
+ LibDirs.append(begin(AArch64LibDirs), end(AArch64LibDirs));
+ TripleAliases.append(begin(AArch64Triples), end(AArch64Triples));
+ BiarchLibDirs.append(begin(AArch64LibDirs), end(AArch64LibDirs));
+ BiarchTripleAliases.append(begin(AArch64Triples), end(AArch64Triples));
break;
- case llvm::Triple::arm64_be:
case llvm::Triple::aarch64_be:
- LibDirs.append(AArch64beLibDirs,
- AArch64beLibDirs + llvm::array_lengthof(AArch64beLibDirs));
- TripleAliases.append(AArch64beTriples,
- AArch64beTriples + llvm::array_lengthof(AArch64beTriples));
- BiarchLibDirs.append(AArch64beLibDirs,
- AArch64beLibDirs + llvm::array_lengthof(AArch64beLibDirs));
- BiarchTripleAliases.append(
- AArch64beTriples, AArch64beTriples + llvm::array_lengthof(AArch64beTriples));
+ LibDirs.append(begin(AArch64beLibDirs), end(AArch64beLibDirs));
+ TripleAliases.append(begin(AArch64beTriples), end(AArch64beTriples));
+ BiarchLibDirs.append(begin(AArch64beLibDirs), end(AArch64beLibDirs));
+ BiarchTripleAliases.append(begin(AArch64beTriples), end(AArch64beTriples));
break;
case llvm::Triple::arm:
case llvm::Triple::thumb:
- LibDirs.append(ARMLibDirs, ARMLibDirs + llvm::array_lengthof(ARMLibDirs));
+ LibDirs.append(begin(ARMLibDirs), end(ARMLibDirs));
if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
- TripleAliases.append(ARMHFTriples,
- ARMHFTriples + llvm::array_lengthof(ARMHFTriples));
+ TripleAliases.append(begin(ARMHFTriples), end(ARMHFTriples));
} else {
- TripleAliases.append(ARMTriples,
- ARMTriples + llvm::array_lengthof(ARMTriples));
+ TripleAliases.append(begin(ARMTriples), end(ARMTriples));
}
break;
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
- LibDirs.append(ARMebLibDirs, ARMebLibDirs + llvm::array_lengthof(ARMebLibDirs));
+ LibDirs.append(begin(ARMebLibDirs), end(ARMebLibDirs));
if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
- TripleAliases.append(ARMebHFTriples,
- ARMebHFTriples + llvm::array_lengthof(ARMebHFTriples));
+ TripleAliases.append(begin(ARMebHFTriples), end(ARMebHFTriples));
} else {
- TripleAliases.append(ARMebTriples,
- ARMebTriples + llvm::array_lengthof(ARMebTriples));
+ TripleAliases.append(begin(ARMebTriples), end(ARMebTriples));
}
break;
case llvm::Triple::x86_64:
- LibDirs.append(X86_64LibDirs,
- X86_64LibDirs + llvm::array_lengthof(X86_64LibDirs));
- TripleAliases.append(X86_64Triples,
- X86_64Triples + llvm::array_lengthof(X86_64Triples));
- // x32 is always available when x86_64 is available, so adding it as secondary
- // arch with x86_64 triples
+ LibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
+ TripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
+ // x32 is always available when x86_64 is available, so adding it as
+ // secondary arch with x86_64 triples
if (TargetTriple.getEnvironment() == llvm::Triple::GNUX32) {
- BiarchLibDirs.append(X32LibDirs,
- X32LibDirs + llvm::array_lengthof(X32LibDirs));
- BiarchTripleAliases.append(X86_64Triples,
- X86_64Triples + llvm::array_lengthof(X86_64Triples));
+ BiarchLibDirs.append(begin(X32LibDirs), end(X32LibDirs));
+ BiarchTripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
} else {
- BiarchLibDirs.append(X86LibDirs,
- X86LibDirs + llvm::array_lengthof(X86LibDirs));
- BiarchTripleAliases.append(X86Triples,
- X86Triples + llvm::array_lengthof(X86Triples));
+ BiarchLibDirs.append(begin(X86LibDirs), end(X86LibDirs));
+ BiarchTripleAliases.append(begin(X86Triples), end(X86Triples));
}
break;
case llvm::Triple::x86:
- LibDirs.append(X86LibDirs, X86LibDirs + llvm::array_lengthof(X86LibDirs));
- TripleAliases.append(X86Triples,
- X86Triples + llvm::array_lengthof(X86Triples));
- BiarchLibDirs.append(X86_64LibDirs,
- X86_64LibDirs + llvm::array_lengthof(X86_64LibDirs));
- BiarchTripleAliases.append(
- X86_64Triples, X86_64Triples + llvm::array_lengthof(X86_64Triples));
+ LibDirs.append(begin(X86LibDirs), end(X86LibDirs));
+ TripleAliases.append(begin(X86Triples), end(X86Triples));
+ BiarchLibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
+ BiarchTripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
break;
case llvm::Triple::mips:
- LibDirs.append(MIPSLibDirs,
- MIPSLibDirs + llvm::array_lengthof(MIPSLibDirs));
- TripleAliases.append(MIPSTriples,
- MIPSTriples + llvm::array_lengthof(MIPSTriples));
- BiarchLibDirs.append(MIPS64LibDirs,
- MIPS64LibDirs + llvm::array_lengthof(MIPS64LibDirs));
- BiarchTripleAliases.append(
- MIPS64Triples, MIPS64Triples + llvm::array_lengthof(MIPS64Triples));
+ LibDirs.append(begin(MIPSLibDirs), end(MIPSLibDirs));
+ TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
+ BiarchLibDirs.append(begin(MIPS64LibDirs), end(MIPS64LibDirs));
+ BiarchTripleAliases.append(begin(MIPS64Triples), end(MIPS64Triples));
break;
case llvm::Triple::mipsel:
- LibDirs.append(MIPSELLibDirs,
- MIPSELLibDirs + llvm::array_lengthof(MIPSELLibDirs));
- TripleAliases.append(MIPSELTriples,
- MIPSELTriples + llvm::array_lengthof(MIPSELTriples));
- TripleAliases.append(MIPSTriples,
- MIPSTriples + llvm::array_lengthof(MIPSTriples));
- BiarchLibDirs.append(
- MIPS64ELLibDirs,
- MIPS64ELLibDirs + llvm::array_lengthof(MIPS64ELLibDirs));
- BiarchTripleAliases.append(
- MIPS64ELTriples,
- MIPS64ELTriples + llvm::array_lengthof(MIPS64ELTriples));
+ LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
+ TripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
+ TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
+ BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
+ BiarchTripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
break;
case llvm::Triple::mips64:
- LibDirs.append(MIPS64LibDirs,
- MIPS64LibDirs + llvm::array_lengthof(MIPS64LibDirs));
- TripleAliases.append(MIPS64Triples,
- MIPS64Triples + llvm::array_lengthof(MIPS64Triples));
- BiarchLibDirs.append(MIPSLibDirs,
- MIPSLibDirs + llvm::array_lengthof(MIPSLibDirs));
- BiarchTripleAliases.append(MIPSTriples,
- MIPSTriples + llvm::array_lengthof(MIPSTriples));
+ LibDirs.append(begin(MIPS64LibDirs), end(MIPS64LibDirs));
+ TripleAliases.append(begin(MIPS64Triples), end(MIPS64Triples));
+ BiarchLibDirs.append(begin(MIPSLibDirs), end(MIPSLibDirs));
+ BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
break;
case llvm::Triple::mips64el:
- LibDirs.append(MIPS64ELLibDirs,
- MIPS64ELLibDirs + llvm::array_lengthof(MIPS64ELLibDirs));
- TripleAliases.append(
- MIPS64ELTriples,
- MIPS64ELTriples + llvm::array_lengthof(MIPS64ELTriples));
- BiarchLibDirs.append(MIPSELLibDirs,
- MIPSELLibDirs + llvm::array_lengthof(MIPSELLibDirs));
- BiarchTripleAliases.append(
- MIPSELTriples, MIPSELTriples + llvm::array_lengthof(MIPSELTriples));
- BiarchTripleAliases.append(
- MIPSTriples, MIPSTriples + llvm::array_lengthof(MIPSTriples));
+ LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
+ TripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
+ BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
+ BiarchTripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
+ BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
break;
case llvm::Triple::ppc:
- LibDirs.append(PPCLibDirs, PPCLibDirs + llvm::array_lengthof(PPCLibDirs));
- TripleAliases.append(PPCTriples,
- PPCTriples + llvm::array_lengthof(PPCTriples));
- BiarchLibDirs.append(PPC64LibDirs,
- PPC64LibDirs + llvm::array_lengthof(PPC64LibDirs));
- BiarchTripleAliases.append(
- PPC64Triples, PPC64Triples + llvm::array_lengthof(PPC64Triples));
+ LibDirs.append(begin(PPCLibDirs), end(PPCLibDirs));
+ TripleAliases.append(begin(PPCTriples), end(PPCTriples));
+ BiarchLibDirs.append(begin(PPC64LibDirs), end(PPC64LibDirs));
+ BiarchTripleAliases.append(begin(PPC64Triples), end(PPC64Triples));
break;
case llvm::Triple::ppc64:
- LibDirs.append(PPC64LibDirs,
- PPC64LibDirs + llvm::array_lengthof(PPC64LibDirs));
- TripleAliases.append(PPC64Triples,
- PPC64Triples + llvm::array_lengthof(PPC64Triples));
- BiarchLibDirs.append(PPCLibDirs,
- PPCLibDirs + llvm::array_lengthof(PPCLibDirs));
- BiarchTripleAliases.append(PPCTriples,
- PPCTriples + llvm::array_lengthof(PPCTriples));
+ LibDirs.append(begin(PPC64LibDirs), end(PPC64LibDirs));
+ TripleAliases.append(begin(PPC64Triples), end(PPC64Triples));
+ BiarchLibDirs.append(begin(PPCLibDirs), end(PPCLibDirs));
+ BiarchTripleAliases.append(begin(PPCTriples), end(PPCTriples));
break;
case llvm::Triple::ppc64le:
- LibDirs.append(PPC64LELibDirs,
- PPC64LELibDirs + llvm::array_lengthof(PPC64LELibDirs));
- TripleAliases.append(PPC64LETriples,
- PPC64LETriples + llvm::array_lengthof(PPC64LETriples));
+ LibDirs.append(begin(PPC64LELibDirs), end(PPC64LELibDirs));
+ TripleAliases.append(begin(PPC64LETriples), end(PPC64LETriples));
break;
case llvm::Triple::sparc:
- LibDirs.append(SPARCv8LibDirs,
- SPARCv8LibDirs + llvm::array_lengthof(SPARCv8LibDirs));
- TripleAliases.append(SPARCv8Triples,
- SPARCv8Triples + llvm::array_lengthof(SPARCv8Triples));
- BiarchLibDirs.append(SPARCv9LibDirs,
- SPARCv9LibDirs + llvm::array_lengthof(SPARCv9LibDirs));
- BiarchTripleAliases.append(
- SPARCv9Triples, SPARCv9Triples + llvm::array_lengthof(SPARCv9Triples));
+ LibDirs.append(begin(SPARCv8LibDirs), end(SPARCv8LibDirs));
+ TripleAliases.append(begin(SPARCv8Triples), end(SPARCv8Triples));
+ BiarchLibDirs.append(begin(SPARCv9LibDirs), end(SPARCv9LibDirs));
+ BiarchTripleAliases.append(begin(SPARCv9Triples), end(SPARCv9Triples));
break;
case llvm::Triple::sparcv9:
- LibDirs.append(SPARCv9LibDirs,
- SPARCv9LibDirs + llvm::array_lengthof(SPARCv9LibDirs));
- TripleAliases.append(SPARCv9Triples,
- SPARCv9Triples + llvm::array_lengthof(SPARCv9Triples));
- BiarchLibDirs.append(SPARCv8LibDirs,
- SPARCv8LibDirs + llvm::array_lengthof(SPARCv8LibDirs));
- BiarchTripleAliases.append(
- SPARCv8Triples, SPARCv8Triples + llvm::array_lengthof(SPARCv8Triples));
+ LibDirs.append(begin(SPARCv9LibDirs), end(SPARCv9LibDirs));
+ TripleAliases.append(begin(SPARCv9Triples), end(SPARCv9Triples));
+ BiarchLibDirs.append(begin(SPARCv8LibDirs), end(SPARCv8LibDirs));
+ BiarchTripleAliases.append(begin(SPARCv8Triples), end(SPARCv8Triples));
break;
case llvm::Triple::systemz:
- LibDirs.append(SystemZLibDirs,
- SystemZLibDirs + llvm::array_lengthof(SystemZLibDirs));
- TripleAliases.append(SystemZTriples,
- SystemZTriples + llvm::array_lengthof(SystemZTriples));
+ LibDirs.append(begin(SystemZLibDirs), end(SystemZLibDirs));
+ TripleAliases.append(begin(SystemZTriples), end(SystemZTriples));
break;
default:
@@ -1645,6 +1557,10 @@ struct DetectedMultilibs {
llvm::Optional<Multilib> BiarchSibling;
};
+static Multilib makeMultilib(StringRef commonSuffix) {
+ return Multilib(commonSuffix, commonSuffix, commonSuffix);
+}
+
static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
const llvm::opt::ArgList &Args,
DetectedMultilibs &Result) {
@@ -1678,69 +1594,46 @@ static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
// Check for FSF toolchain multilibs
MultilibSet FSFMipsMultilibs;
{
- Multilib MArchMips32 = Multilib()
- .gccSuffix("/mips32")
- .osSuffix("/mips32")
- .includeSuffix("/mips32")
+ auto MArchMips32 = makeMultilib("/mips32")
.flag("+m32").flag("-m64").flag("-mmicromips").flag("+march=mips32");
- Multilib MArchMicroMips = Multilib()
- .gccSuffix("/micromips")
- .osSuffix("/micromips")
- .includeSuffix("/micromips")
+ auto MArchMicroMips = makeMultilib("/micromips")
.flag("+m32").flag("-m64").flag("+mmicromips");
- Multilib MArchMips64r2 = Multilib()
- .gccSuffix("/mips64r2")
- .osSuffix("/mips64r2")
- .includeSuffix("/mips64r2")
+ auto MArchMips64r2 = makeMultilib("/mips64r2")
.flag("-m32").flag("+m64").flag("+march=mips64r2");
- Multilib MArchMips64 = Multilib()
- .gccSuffix("/mips64")
- .osSuffix("/mips64")
- .includeSuffix("/mips64")
+ auto MArchMips64 = makeMultilib("/mips64")
.flag("-m32").flag("+m64").flag("-march=mips64r2");
- Multilib MArchDefault = Multilib()
+ auto MArchDefault = makeMultilib("")
.flag("+m32").flag("-m64").flag("-mmicromips").flag("+march=mips32r2");
- Multilib Mips16 = Multilib()
- .gccSuffix("/mips16")
- .osSuffix("/mips16")
- .includeSuffix("/mips16")
+ auto Mips16 = makeMultilib("/mips16")
.flag("+mips16");
- Multilib MAbi64 = Multilib()
- .gccSuffix("/64")
- .osSuffix("/64")
- .includeSuffix("/64")
+ auto UCLibc = makeMultilib("/uclibc")
+ .flag("+muclibc");
+
+ auto MAbi64 = makeMultilib("/64")
.flag("+mabi=n64").flag("-mabi=n32").flag("-m32");
- Multilib BigEndian = Multilib()
+ auto BigEndian = makeMultilib("")
.flag("+EB").flag("-EL");
- Multilib LittleEndian = Multilib()
- .gccSuffix("/el")
- .osSuffix("/el")
- .includeSuffix("/el")
+ auto LittleEndian = makeMultilib("/el")
.flag("+EL").flag("-EB");
- Multilib SoftFloat = Multilib()
- .gccSuffix("/sof")
- .osSuffix("/sof")
- .includeSuffix("/sof")
+ auto SoftFloat = makeMultilib("/sof")
.flag("+msoft-float");
- Multilib Nan2008 = Multilib()
- .gccSuffix("/nan2008")
- .osSuffix("/nan2008")
- .includeSuffix("/nan2008")
+ auto Nan2008 = makeMultilib("/nan2008")
.flag("+mnan=2008");
FSFMipsMultilibs = MultilibSet()
.Either(MArchMips32, MArchMicroMips,
MArchMips64r2, MArchMips64, MArchDefault)
+ .Maybe(UCLibc)
.Maybe(Mips16)
.FilterOut("/mips64/mips16")
.FilterOut("/mips64r2/mips16")
@@ -1754,59 +1647,59 @@ static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
.Maybe(SoftFloat)
.Maybe(Nan2008)
.FilterOut(".*sof/nan2008")
- .FilterOut(NonExistent);
+ .FilterOut(NonExistent)
+ .setIncludeDirsCallback([](
+ StringRef InstallDir, StringRef TripleStr, const Multilib &M) {
+ std::vector<std::string> Dirs;
+ Dirs.push_back((InstallDir + "/include").str());
+ std::string SysRootInc = InstallDir.str() + "/../../../../sysroot";
+ if (StringRef(M.includeSuffix()).startswith("/uclibc"))
+ Dirs.push_back(SysRootInc + "/uclibc/usr/include");
+ else
+ Dirs.push_back(SysRootInc + "/usr/include");
+ return Dirs;
+ });
}
// Check for Code Sourcery toolchain multilibs
MultilibSet CSMipsMultilibs;
{
- Multilib MArchMips16 = Multilib()
- .gccSuffix("/mips16")
- .osSuffix("/mips16")
- .includeSuffix("/mips16")
+ auto MArchMips16 = makeMultilib("/mips16")
.flag("+m32").flag("+mips16");
- Multilib MArchMicroMips = Multilib()
- .gccSuffix("/micromips")
- .osSuffix("/micromips")
- .includeSuffix("/micromips")
+ auto MArchMicroMips = makeMultilib("/micromips")
.flag("+m32").flag("+mmicromips");
- Multilib MArchDefault = Multilib()
+ auto MArchDefault = makeMultilib("")
.flag("-mips16").flag("-mmicromips");
- Multilib SoftFloat = Multilib()
- .gccSuffix("/soft-float")
- .osSuffix("/soft-float")
- .includeSuffix("/soft-float")
+ auto UCLibc = makeMultilib("/uclibc")
+ .flag("+muclibc");
+
+ auto SoftFloat = makeMultilib("/soft-float")
.flag("+msoft-float");
- Multilib Nan2008 = Multilib()
- .gccSuffix("/nan2008")
- .osSuffix("/nan2008")
- .includeSuffix("/nan2008")
+ auto Nan2008 = makeMultilib("/nan2008")
.flag("+mnan=2008");
- Multilib DefaultFloat = Multilib()
+ auto DefaultFloat = makeMultilib("")
.flag("-msoft-float").flag("-mnan=2008");
- Multilib BigEndian = Multilib()
+ auto BigEndian = makeMultilib("")
.flag("+EB").flag("-EL");
- Multilib LittleEndian = Multilib()
- .gccSuffix("/el")
- .osSuffix("/el")
- .includeSuffix("/el")
+ auto LittleEndian = makeMultilib("/el")
.flag("+EL").flag("-EB");
// Note that this one's osSuffix is ""
- Multilib MAbi64 = Multilib()
+ auto MAbi64 = makeMultilib("")
.gccSuffix("/64")
.includeSuffix("/64")
.flag("+mabi=n64").flag("-mabi=n32").flag("-m32");
CSMipsMultilibs = MultilibSet()
.Either(MArchMips16, MArchMicroMips, MArchDefault)
+ .Maybe(UCLibc)
.Either(SoftFloat, Nan2008, DefaultFloat)
.FilterOut("/micromips/nan2008")
.FilterOut("/mips16/nan2008")
@@ -1814,7 +1707,19 @@ static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
.Maybe(MAbi64)
.FilterOut("/mips16.*/64")
.FilterOut("/micromips.*/64")
- .FilterOut(NonExistent);
+ .FilterOut(NonExistent)
+ .setIncludeDirsCallback([](
+ StringRef InstallDir, StringRef TripleStr, const Multilib &M) {
+ std::vector<std::string> Dirs;
+ Dirs.push_back((InstallDir + "/include").str());
+ std::string SysRootInc =
+ InstallDir.str() + "/../../../../" + TripleStr.str();
+ if (StringRef(M.includeSuffix()).startswith("/uclibc"))
+ Dirs.push_back(SysRootInc + "/libc/uclibc/usr/include");
+ else
+ Dirs.push_back(SysRootInc + "/libc/usr/include");
+ return Dirs;
+ });
}
MultilibSet AndroidMipsMultilibs = MultilibSet()
@@ -1843,29 +1748,27 @@ static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
MultilibSet ImgMultilibs;
{
- Multilib Mips64r6 = Multilib()
- .gccSuffix("/mips64r6")
- .osSuffix("/mips64r6")
- .includeSuffix("/mips64r6")
+ auto Mips64r6 = makeMultilib("/mips64r6")
.flag("+m64").flag("-m32");
- Multilib LittleEndian = Multilib()
- .gccSuffix("/el")
- .osSuffix("/el")
- .includeSuffix("/el")
+ auto LittleEndian = makeMultilib("/el")
.flag("+EL").flag("-EB");
- Multilib MAbi64 = Multilib()
- .gccSuffix("/64")
- .osSuffix("/64")
- .includeSuffix("/64")
+ auto MAbi64 = makeMultilib("/64")
.flag("+mabi=n64").flag("-mabi=n32").flag("-m32");
ImgMultilibs = MultilibSet()
.Maybe(Mips64r6)
.Maybe(MAbi64)
.Maybe(LittleEndian)
- .FilterOut(NonExistent);
+ .FilterOut(NonExistent)
+ .setIncludeDirsCallback([](
+ StringRef InstallDir, StringRef TripleStr, const Multilib &M) {
+ std::vector<std::string> Dirs;
+ Dirs.push_back((InstallDir + "/include").str());
+ Dirs.push_back((InstallDir + "/../../../../sysroot/usr/include").str());
+ return Dirs;
+ });
}
StringRef CPUName;
@@ -1884,6 +1787,7 @@ static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
addMultilibFlag(CPUName == "mips64r2" || CPUName == "octeon",
"march=mips64r2", Flags);
addMultilibFlag(isMicroMips(Args), "mmicromips", Flags);
+ addMultilibFlag(tools::mips::isUCLibc(Args), "muclibc", Flags);
addMultilibFlag(tools::mips::isNaN2008(Args, TargetTriple), "mnan=2008",
Flags);
addMultilibFlag(ABIName == "n32", "mabi=n32", Flags);
@@ -2159,12 +2063,16 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
getTriple().getArch() == llvm::Triple::x86_64 ||
getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::aarch64_be ||
- getTriple().getArch() == llvm::Triple::arm64 ||
- getTriple().getArch() == llvm::Triple::arm64_be ||
getTriple().getArch() == llvm::Triple::arm ||
getTriple().getArch() == llvm::Triple::armeb ||
getTriple().getArch() == llvm::Triple::thumb ||
- getTriple().getArch() == llvm::Triple::thumbeb;
+ getTriple().getArch() == llvm::Triple::thumbeb ||
+ getTriple().getArch() == llvm::Triple::ppc ||
+ getTriple().getArch() == llvm::Triple::ppc64 ||
+ getTriple().getArch() == llvm::Triple::ppc64le ||
+ getTriple().getArch() == llvm::Triple::sparc ||
+ getTriple().getArch() == llvm::Triple::sparcv9 ||
+ getTriple().getArch() == llvm::Triple::systemz;
}
void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
@@ -2173,8 +2081,6 @@ void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
bool UseInitArrayDefault =
getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::aarch64_be ||
- getTriple().getArch() == llvm::Triple::arm64 ||
- getTriple().getArch() == llvm::Triple::arm64_be ||
(getTriple().getOS() == llvm::Triple::Linux &&
(!V.isOlderThan(4, 7, 0) ||
getTriple().getEnvironment() == llvm::Triple::Android));
@@ -2187,11 +2093,14 @@ void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
/// Hexagon Toolchain
-std::string Hexagon_TC::GetGnuDir(const std::string &InstalledDir) {
+std::string Hexagon_TC::GetGnuDir(const std::string &InstalledDir,
+ const ArgList &Args) {
// Locate the rest of the toolchain ...
- if (strlen(GCC_INSTALL_PREFIX))
- return std::string(GCC_INSTALL_PREFIX);
+ std::string GccToolchain = getGCCToolchainDir(Args);
+
+ if (!GccToolchain.empty())
+ return GccToolchain;
std::string InstallRelDir = InstalledDir + "/../../gnu";
if (llvm::sys::fs::exists(InstallRelDir))
@@ -2206,8 +2115,8 @@ std::string Hexagon_TC::GetGnuDir(const std::string &InstalledDir) {
static void GetHexagonLibraryPaths(
const ArgList &Args,
- const std::string Ver,
- const std::string MarchString,
+ const std::string &Ver,
+ const std::string &MarchString,
const std::string &InstalledDir,
ToolChain::path_list *LibPaths)
{
@@ -2231,7 +2140,7 @@ static void GetHexagonLibraryPaths(
const std::string MarchSuffix = "/" + MarchString;
const std::string G0Suffix = "/G0";
const std::string MarchG0Suffix = MarchSuffix + G0Suffix;
- const std::string RootDir = Hexagon_TC::GetGnuDir(InstalledDir) + "/";
+ const std::string RootDir = Hexagon_TC::GetGnuDir(InstalledDir, Args) + "/";
// lib/gcc/hexagon/...
std::string LibGCCHexagonDir = RootDir + "lib/gcc/hexagon/";
@@ -2259,7 +2168,7 @@ Hexagon_TC::Hexagon_TC(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: Linux(D, Triple, Args) {
const std::string InstalledDir(getDriver().getInstalledDir());
- const std::string GnuDir = Hexagon_TC::GetGnuDir(InstalledDir);
+ const std::string GnuDir = Hexagon_TC::GetGnuDir(InstalledDir, Args);
// Note: Generic_GCC::Generic_GCC adds InstalledDir and getDriver().Dir to
// program paths
@@ -2314,7 +2223,7 @@ void Hexagon_TC::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
std::string Ver(GetGCCLibAndIncVersion());
- std::string GnuDir = Hexagon_TC::GetGnuDir(D.InstalledDir);
+ std::string GnuDir = Hexagon_TC::GetGnuDir(D.InstalledDir, DriverArgs);
std::string HexagonDir(GnuDir + "/lib/gcc/hexagon/" + Ver);
addExternCSystemInclude(DriverArgs, CC1Args, HexagonDir + "/include");
addExternCSystemInclude(DriverArgs, CC1Args, HexagonDir + "/include-fixed");
@@ -2330,7 +2239,8 @@ void Hexagon_TC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
const Driver &D = getDriver();
std::string Ver(GetGCCLibAndIncVersion());
- SmallString<128> IncludeDir(Hexagon_TC::GetGnuDir(D.InstalledDir));
+ SmallString<128> IncludeDir(
+ Hexagon_TC::GetGnuDir(D.InstalledDir, DriverArgs));
llvm::sys::path::append(IncludeDir, "hexagon/include/c++/");
llvm::sys::path::append(IncludeDir, Ver);
@@ -2599,7 +2509,7 @@ bool FreeBSD::HasNativeLLVMSupport() const {
}
bool FreeBSD::isPIEDefault() const {
- return getSanitizerArgs().hasZeroBaseShadow();
+ return getSanitizerArgs().requiresPIE();
}
/// NetBSD - NetBSD tool chain which can call as(1) and ld(1) directly.
@@ -2623,11 +2533,13 @@ NetBSD::NetBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
case llvm::Triple::thumbeb:
switch (Triple.getEnvironment()) {
case llvm::Triple::EABI:
- case llvm::Triple::EABIHF:
case llvm::Triple::GNUEABI:
- case llvm::Triple::GNUEABIHF:
getFilePaths().push_back("=/usr/lib/eabi");
break;
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABIHF:
+ getFilePaths().push_back("=/usr/lib/eabihf");
+ break;
default:
getFilePaths().push_back("=/usr/lib/oabi");
break;
@@ -2640,6 +2552,9 @@ NetBSD::NetBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
else if (tools::mips::hasMipsAbiArg(Args, "64"))
getFilePaths().push_back("=/usr/lib/64");
break;
+ case llvm::Triple::ppc:
+ getFilePaths().push_back("=/usr/lib/powerpc");
+ break;
case llvm::Triple::sparc:
getFilePaths().push_back("=/usr/lib/sparc");
break;
@@ -2674,12 +2589,16 @@ NetBSD::GetCXXStdlibType(const ArgList &Args) const {
unsigned Major, Minor, Micro;
getTriple().getOSVersion(Major, Minor, Micro);
- if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 40) || Major == 0) {
+ if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 49) || Major == 0) {
switch (getArch()) {
+ case llvm::Triple::aarch64:
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
return ToolChain::CST_Libcxx;
@@ -2726,32 +2645,6 @@ Tool *Minix::buildLinker() const {
return new tools::minix::Link(*this);
}
-/// AuroraUX - AuroraUX tool chain which can call as(1) and ld(1) directly.
-
-AuroraUX::AuroraUX(const Driver &D, const llvm::Triple& Triple,
- const ArgList &Args)
- : Generic_GCC(D, Triple, Args) {
-
- getProgramPaths().push_back(getDriver().getInstalledDir());
- if (getDriver().getInstalledDir() != getDriver().Dir)
- getProgramPaths().push_back(getDriver().Dir);
-
- getFilePaths().push_back(getDriver().Dir + "/../lib");
- getFilePaths().push_back("/usr/lib");
- getFilePaths().push_back("/usr/sfw/lib");
- getFilePaths().push_back("/opt/gcc4/lib");
- getFilePaths().push_back("/opt/gcc4/lib/gcc/i386-pc-solaris2.11/4.2.4");
-
-}
-
-Tool *AuroraUX::buildAssembler() const {
- return new tools::auroraux::Assemble(*this);
-}
-
-Tool *AuroraUX::buildLinker() const {
- return new tools::auroraux::Link(*this);
-}
-
/// Solaris - Solaris tool chain which can call as(1) and ld(1) directly.
Solaris::Solaris(const Driver &D, const llvm::Triple& Triple,
@@ -2825,7 +2718,7 @@ static Distro DetectDistro(llvm::Triple::ArchType Arch) {
llvm::MemoryBuffer::getFile("/etc/lsb-release");
if (File) {
StringRef Data = File.get()->getBuffer();
- SmallVector<StringRef, 8> Lines;
+ SmallVector<StringRef, 16> Lines;
Data.split(Lines, "\n");
Distro Version = UnknownDistro;
for (unsigned i = 0, s = Lines.size(); i != s; ++i)
@@ -2939,12 +2832,10 @@ static std::string getMultiarchTriple(const llvm::Triple &TargetTriple,
llvm::sys::fs::exists(SysRoot + "/lib/x86_64-linux-gnu"))
return "x86_64-linux-gnu";
return TargetTriple.str();
- case llvm::Triple::arm64:
case llvm::Triple::aarch64:
if (llvm::sys::fs::exists(SysRoot + "/lib/aarch64-linux-gnu"))
return "aarch64-linux-gnu";
return TargetTriple.str();
- case llvm::Triple::arm64_be:
case llvm::Triple::aarch64_be:
if (llvm::sys::fs::exists(SysRoot + "/lib/aarch64_be-linux-gnu"))
return "aarch64_be-linux-gnu";
@@ -3115,7 +3006,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// installation that is *not* within the system root to ensure two things:
//
// 1) Any DSOs that are linked in from this tree or from the install path
- // above must be preasant on the system root and found via an
+ // above must be present on the system root and found via an
// appropriate rpath.
// 2) There must not be libraries installed into
// <prefix>/<triple>/<libdir> unless they should be preferred over
@@ -3272,20 +3163,16 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// Lacking those, try to detect the correct set of system includes for the
// target triple.
- // Sourcery CodeBench and modern FSF Mips toolchains put extern C
- // system includes under three additional directories.
- if (GCCInstallation.isValid() && isMipsArch(getTriple().getArch())) {
- addExternCSystemIncludeIfExists(
- DriverArgs, CC1Args, GCCInstallation.getInstallPath() + "/include");
-
- addExternCSystemIncludeIfExists(
- DriverArgs, CC1Args,
- GCCInstallation.getInstallPath() + "/../../../../" +
- GCCInstallation.getTriple().str() + "/libc/usr/include");
-
- addExternCSystemIncludeIfExists(
- DriverArgs, CC1Args,
- GCCInstallation.getInstallPath() + "/../../../../sysroot/usr/include");
+ // Add include directories specific to the selected multilib set and multilib.
+ if (GCCInstallation.isValid()) {
+ auto Callback = Multilibs.includeDirsCallback();
+ if (Callback) {
+ const auto IncludePaths = Callback(GCCInstallation.getInstallPath(),
+ GCCInstallation.getTriple().str(),
+ GCCInstallation.getMultilib());
+ for (const auto &Path : IncludePaths)
+ addExternCSystemIncludeIfExists(DriverArgs, CC1Args, Path);
+ }
}
// Implement generic Debian multiarch support.
@@ -3344,9 +3231,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
} else if (getTriple().getArch() == llvm::Triple::x86) {
MultiarchIncludeDirs = X86MultiarchIncludeDirs;
} else if (getTriple().getArch() == llvm::Triple::aarch64 ||
- getTriple().getArch() == llvm::Triple::aarch64_be ||
- getTriple().getArch() == llvm::Triple::arm64 ||
- getTriple().getArch() == llvm::Triple::arm64_be) {
+ getTriple().getArch() == llvm::Triple::aarch64_be) {
MultiarchIncludeDirs = AArch64MultiarchIncludeDirs;
} else if (getTriple().getArch() == llvm::Triple::arm) {
if (getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
@@ -3500,7 +3385,7 @@ void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
}
bool Linux::isPIEDefault() const {
- return getSanitizerArgs().hasZeroBaseShadow();
+ return getSanitizerArgs().requiresPIE();
}
/// DragonFly - DragonFly tool chain which can call as(1) and ld(1) directly.
@@ -3586,7 +3471,8 @@ void XCore::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
void XCore::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdinc) ||
- DriverArgs.hasArg(options::OPT_nostdlibinc))
+ DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
return;
if (const char *cl_include_dir = getenv("XCC_CPLUS_INCLUDE_PATH")) {
SmallVector<StringRef, 4> Dirs;
diff --git a/lib/Driver/ToolChains.h b/lib/Driver/ToolChains.h
index b5df8668d69e..47fb10d3f6c5 100644
--- a/lib/Driver/ToolChains.h
+++ b/lib/Driver/ToolChains.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_LIB_DRIVER_TOOLCHAINS_H_
-#define CLANG_LIB_DRIVER_TOOLCHAINS_H_
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_H
#include "Tools.h"
#include "clang/Basic/VersionTuple.h"
@@ -234,9 +234,10 @@ public:
void AddLinkRuntimeLib(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
- StringRef DarwinStaticLib,
+ StringRef DarwinLibName,
bool AlwaysLink = false,
- bool IsEmbedded = false) const;
+ bool IsEmbedded = false,
+ bool AddRPath = false) const;
/// }
/// @name ToolChain Implementation
@@ -255,7 +256,7 @@ public:
bool IsBlocksDefault() const override {
// Always allow blocks on Apple; users interested in versioning are
- // expected to use /usr/include/Blocks.h.
+ // expected to use /usr/include/Block.h.
return true;
}
bool IsIntegratedAssemblerDefault() const override {
@@ -362,7 +363,7 @@ public:
bool isKernelStatic() const override {
return !isTargetIPhoneOS() || isIPhoneOSVersionLT(6, 0) ||
- getTriple().getArch() == llvm::Triple::arm64;
+ getTriple().getArch() == llvm::Triple::aarch64;
}
protected:
@@ -487,7 +488,8 @@ public:
AddCCKextLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
- virtual void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const;
+ virtual void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args)
+ const override;
void
AddLinkARCArgs(const llvm::opt::ArgList &Args,
@@ -506,16 +508,6 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
};
-class LLVM_LIBRARY_VISIBILITY AuroraUX : public Generic_GCC {
-public:
- AuroraUX(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
-
-protected:
- Tool *buildAssembler() const override;
- Tool *buildLinker() const override;
-};
-
class LLVM_LIBRARY_VISIBILITY Solaris : public Generic_GCC {
public:
Solaris(const Driver &D, const llvm::Triple &Triple,
@@ -542,14 +534,6 @@ public:
return 2;
}
- virtual bool IsIntegratedAssemblerDefault() const override {
- if (getTriple().getArch() == llvm::Triple::ppc ||
- getTriple().getArch() == llvm::Triple::sparc ||
- getTriple().getArch() == llvm::Triple::sparcv9)
- return true;
- return Generic_ELF::IsIntegratedAssemblerDefault();
- }
-
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
@@ -591,12 +575,6 @@ public:
void
AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- bool IsIntegratedAssemblerDefault() const override {
- if (getTriple().getArch() == llvm::Triple::ppc ||
- getTriple().getArch() == llvm::Triple::ppc64)
- return true;
- return Generic_ELF::IsIntegratedAssemblerDefault();
- }
bool UseSjLjExceptions() const override;
bool isPIEDefault() const override;
@@ -621,11 +599,6 @@ public:
bool IsUnwindTablesDefault() const override {
return true;
}
- bool IsIntegratedAssemblerDefault() const override {
- if (getTriple().getArch() == llvm::Triple::ppc)
- return true;
- return Generic_ELF::IsIntegratedAssemblerDefault();
- }
protected:
Tool *buildAssembler() const override;
@@ -709,7 +682,8 @@ public:
StringRef GetGCCLibAndIncVersion() const { return GCCLibAndIncVersion.Text; }
- static std::string GetGnuDir(const std::string &InstalledDir);
+ static std::string GetGnuDir(const std::string &InstalledDir,
+ const llvm::opt::ArgList &Args);
static StringRef GetTargetCPU(const llvm::opt::ArgList &Args);
};
@@ -728,10 +702,10 @@ public:
bool isPICDefaultForced() const override;
};
-class LLVM_LIBRARY_VISIBILITY Windows : public ToolChain {
+class LLVM_LIBRARY_VISIBILITY MSVCToolChain : public ToolChain {
public:
- Windows(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
+ MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
bool IsIntegratedAssemblerDefault() const override;
bool IsUnwindTablesDefault() const override;
@@ -746,11 +720,50 @@ public:
AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ bool getWindowsSDKDir(std::string &path, int &major, int &minor) const;
+ bool getWindowsSDKLibraryPath(std::string &path) const;
+ bool getVisualStudioInstallDir(std::string &path) const;
+ bool getVisualStudioBinariesFolder(const char *clangProgramPath,
+ std::string &path) const;
+
protected:
+ void AddSystemIncludeWithSubfolder(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ const std::string &folder,
+ const char *subfolder) const;
+
Tool *buildLinker() const override;
Tool *buildAssembler() const override;
};
+class LLVM_LIBRARY_VISIBILITY CrossWindowsToolChain : public Generic_GCC {
+public:
+ CrossWindowsToolChain(const Driver &D, const llvm::Triple &T,
+ const llvm::opt::ArgList &Args);
+
+ bool IsIntegratedAssemblerDefault() const override { return true; }
+ bool IsUnwindTablesDefault() const override;
+ bool isPICDefault() const override;
+ bool isPIEDefault() const override;
+ bool isPICDefaultForced() const override;
+
+ unsigned int GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ return 0;
+ }
+
+ void AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args)
+ const override;
+ void AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args)
+ const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+protected:
+ Tool *buildLinker() const override;
+ Tool *buildAssembler() const override;
+};
class LLVM_LIBRARY_VISIBILITY XCore : public ToolChain {
public:
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
index 198e82e15369..d625c0e3e2f6 100644
--- a/lib/Driver/Tools.cpp
+++ b/lib/Driver/Tools.cpp
@@ -83,6 +83,23 @@ static void CheckCodeGenerationOptions(const Driver &D, const ArgList &Args) {
<< A->getAsString(Args) << "-static";
}
+// Add backslashes to escape spaces and other backslashes.
+// This is used for the space-separated argument list specified with
+// the -dwarf-debug-flags option.
+static void EscapeSpacesAndBackslashes(const char *Arg,
+ SmallVectorImpl<char> &Res) {
+ for ( ; *Arg; ++Arg) {
+ switch (*Arg) {
+ default: break;
+ case ' ':
+ case '\\':
+ Res.push_back('\\');
+ break;
+ }
+ Res.push_back(*Arg);
+ }
+}
+
// Quote target names for inclusion in GNU Make dependency files.
// Only the characters '$', '#', ' ', '\t' are quoted.
static void QuoteTarget(StringRef Target,
@@ -457,10 +474,10 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) {
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- case llvm::Triple::arm64:
- case llvm::Triple::arm64_be:
case llvm::Triple::arm:
case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
if (Triple.isOSDarwin() || Triple.isOSWindows())
return true;
return false;
@@ -508,7 +525,7 @@ static void getARMHWDivFeatures(const Driver &D, const Arg *A,
} else
D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
-
+
// Handle -mfpu=.
//
// FIXME: Centralize feature selection, defaulting shouldn't be also in the
@@ -546,6 +563,18 @@ static void getARMFPUFeatures(const Driver &D, const Arg *A,
Features.push_back("+d16");
Features.push_back("+fp-only-sp");
Features.push_back("-neon");
+ } else if (FPU == "fp5-sp-d16" || FPU == "fpv5-sp-d16") {
+ Features.push_back("+fp-armv8");
+ Features.push_back("+fp-only-sp");
+ Features.push_back("+d16");
+ Features.push_back("-neon");
+ Features.push_back("-crypto");
+ } else if (FPU == "fp5-dp-d16" || FPU == "fpv5-dp-d16" ||
+ FPU == "fp5-d16" || FPU == "fpv5-d16") {
+ Features.push_back("+fp-armv8");
+ Features.push_back("+d16");
+ Features.push_back("-neon");
+ Features.push_back("-crypto");
} else if (FPU == "fp-armv8") {
Features.push_back("+fp-armv8");
Features.push_back("-neon");
@@ -560,6 +589,12 @@ static void getARMFPUFeatures(const Driver &D, const Arg *A,
Features.push_back("+crypto");
} else if (FPU == "neon") {
Features.push_back("+neon");
+ } else if (FPU == "neon-vfpv3") {
+ Features.push_back("+vfp3");
+ Features.push_back("+neon");
+ } else if (FPU == "neon-vfpv4") {
+ Features.push_back("+neon");
+ Features.push_back("+vfp4");
} else if (FPU == "none") {
Features.push_back("-vfp2");
Features.push_back("-vfp3");
@@ -730,6 +765,7 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
// Select the ABI to use.
//
// FIXME: Support -meabi.
+ // FIXME: Parts of this are duplicated in the backend, unify this somehow.
const char *ABIName = nullptr;
if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
ABIName = A->getValue();
@@ -737,8 +773,7 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
// The backend is hardwired to assume AAPCS for M-class processors, ensure
// the frontend matches that.
if (Triple.getEnvironment() == llvm::Triple::EABI ||
- (Triple.getOS() == llvm::Triple::UnknownOS &&
- Triple.getObjectFormat() == llvm::Triple::MachO) ||
+ Triple.getOS() == llvm::Triple::UnknownOS ||
StringRef(CPUName).startswith("cortex-m")) {
ABIName = "aapcs";
} else {
@@ -760,7 +795,11 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
ABIName = "aapcs";
break;
default:
- ABIName = "apcs-gnu";
+ if (Triple.getOS() == llvm::Triple::NetBSD)
+ ABIName = "apcs-gnu";
+ else
+ ABIName = "aapcs";
+ break;
}
}
CmdArgs.push_back("-target-abi");
@@ -801,6 +840,21 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
CmdArgs.push_back("-arm-use-movt=0");
}
+ // -mkernel implies -mstrict-align; don't add the redundant option.
+ if (!KernelOrKext) {
+ if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
+ options::OPT_munaligned_access)) {
+ CmdArgs.push_back("-backend-option");
+ if (A->getOption().matches(options::OPT_mno_unaligned_access))
+ CmdArgs.push_back("-arm-strict-align");
+ else {
+ if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
+ D.Diag(diag::err_target_unsupported_unaligned) << "v6m";
+ CmdArgs.push_back("-arm-no-strict-align");
+ }
+ }
+ }
+
// Setting -mno-global-merge disables the codegen global merge pass. Setting
// -mglobal-merge has no effect as the pass is enabled by default.
if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge,
@@ -875,9 +929,26 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName);
- if (Args.hasArg(options::OPT_mstrict_align)) {
+ if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
+ options::OPT_munaligned_access)) {
+ CmdArgs.push_back("-backend-option");
+ if (A->getOption().matches(options::OPT_mno_unaligned_access))
+ CmdArgs.push_back("-aarch64-strict-align");
+ else
+ CmdArgs.push_back("-aarch64-no-strict-align");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mfix_cortex_a53_835769,
+ options::OPT_mno_fix_cortex_a53_835769)) {
+ CmdArgs.push_back("-backend-option");
+ if (A->getOption().matches(options::OPT_mfix_cortex_a53_835769))
+ CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1");
+ else
+ CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=0");
+ } else if (Triple.getEnvironment() == llvm::Triple::Android) {
+ // Enabled A53 errata (835769) workaround by default on android
CmdArgs.push_back("-backend-option");
- CmdArgs.push_back("-aarch64-strict-align");
+ CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1");
}
// Setting -mno-global-merge disables the codegen global merge pass. Setting
@@ -906,6 +977,10 @@ void mips::getMipsCPUAndABI(const ArgList &Args,
DefMips64CPU = "mips64r6";
}
+ // MIPS3 is the default for mips64*-unknown-openbsd.
+ if (Triple.getOS() == llvm::Triple::OpenBSD)
+ DefMips64CPU = "mips3";
+
if (Arg *A = Args.getLastArg(options::OPT_march_EQ,
options::OPT_mcpu_EQ))
CPUName = A->getValue();
@@ -952,6 +1027,8 @@ void mips::getMipsCPUAndABI(const ArgList &Args,
.Cases("n32", "n64", DefMips64CPU)
.Default("");
}
+
+ // FIXME: Warn on inconsistent use of -march and -mabi.
}
// Convert ABI name to the GNU tools acceptable variant.
@@ -1024,6 +1101,9 @@ static void getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
Features.push_back("-n64");
Features.push_back(Args.MakeArgString(ABIFeature));
+ AddTargetFeature(Args, Features, options::OPT_mno_abicalls,
+ options::OPT_mabicalls, "noabicalls");
+
StringRef FloatABI = getMipsFloatABI(D, Args);
if (FloatABI == "soft") {
// FIXME: Note, this is a hack. We need to pass the selected float
@@ -1228,6 +1308,35 @@ static void getPPCTargetFeatures(const ArgList &Args,
options::OPT_fno_altivec, "altivec");
}
+void Clang::AddPPCTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Select the ABI to use.
+ const char *ABIName = nullptr;
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
+ ABIName = A->getValue();
+ } else if (getToolChain().getTriple().isOSLinux())
+ switch(getToolChain().getArch()) {
+ case llvm::Triple::ppc64:
+ ABIName = "elfv1";
+ break;
+ case llvm::Triple::ppc64le:
+ ABIName = "elfv2";
+ break;
+ default:
+ break;
+ }
+
+ if (ABIName) {
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName);
+ }
+}
+
+bool ppc::hasPPCAbiArg(const ArgList &Args, const char *Value) {
+ Arg *A = Args.getLastArg(options::OPT_mabi_EQ);
+ return A && (A->getValue() == StringRef(Value));
+}
+
/// Get the (LLVM) name of the R600 gpu we are targeting.
static std::string getR600TargetGPU(const ArgList &Args) {
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
@@ -1246,7 +1355,7 @@ static std::string getR600TargetGPU(const ArgList &Args) {
}
static void getSparcTargetFeatures(const ArgList &Args,
- std::vector<const char *> Features) {
+ std::vector<const char *> &Features) {
bool SoftFloatABI = true;
if (Arg *A =
Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float)) {
@@ -1360,8 +1469,6 @@ static std::string getCPUName(const ArgList &Args, const llvm::Triple &T) {
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- case llvm::Triple::arm64:
- case llvm::Triple::arm64_be:
return getAArch64TargetCPU(Args);
case llvm::Triple::arm:
@@ -1415,6 +1522,7 @@ static std::string getCPUName(const ArgList &Args, const llvm::Triple &T) {
return getSystemZTargetCPU(Args);
case llvm::Triple::r600:
+ case llvm::Triple::amdgcn:
return getR600TargetGPU(Args);
}
}
@@ -1588,7 +1696,7 @@ void Clang::AddHexagonTargetArgs(const ArgList &Args,
}
// Decode AArch64 features from string like +[no]featureA+[no]featureB+...
-static bool DecodeAArch64Features(const Driver &D, const StringRef &text,
+static bool DecodeAArch64Features(const Driver &D, StringRef text,
std::vector<const char *> &Features) {
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
@@ -1697,6 +1805,9 @@ static void getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
success = getAArch64ArchFeaturesFromMarch(D, A->getValue(), Args, Features);
else if ((A = Args.getLastArg(options::OPT_mcpu_EQ)))
success = getAArch64ArchFeaturesFromMcpu(D, A->getValue(), Args, Features);
+ else if (Args.hasArg(options::OPT_arch))
+ success = getAArch64ArchFeaturesFromMcpu(D, getAArch64TargetCPU(Args), Args,
+ Features);
if (success && (A = Args.getLastArg(options::OPT_mtune_EQ)))
success =
@@ -1704,6 +1815,9 @@ static void getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
else if (success && (A = Args.getLastArg(options::OPT_mcpu_EQ)))
success =
getAArch64MicroArchFeaturesFromMcpu(D, A->getValue(), Args, Features);
+ else if (Args.hasArg(options::OPT_arch))
+ success = getAArch64MicroArchFeaturesFromMcpu(D, getAArch64TargetCPU(Args),
+ Args, Features);
if (!success)
D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
@@ -1751,12 +1865,11 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
getPPCTargetFeatures(Args, Features);
break;
case llvm::Triple::sparc:
+ case llvm::Triple::sparcv9:
getSparcTargetFeatures(Args, Features);
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- case llvm::Triple::arm64:
- case llvm::Triple::arm64_be:
getAArch64TargetFeatures(D, Args, Features);
break;
case llvm::Triple::x86:
@@ -1804,43 +1917,15 @@ shouldUseExceptionTablesForObjCExceptions(const ObjCRuntime &runtime,
Triple.getArch() == llvm::Triple::arm));
}
-namespace {
- struct ExceptionSettings {
- bool ExceptionsEnabled;
- bool ShouldUseExceptionTables;
- ExceptionSettings() : ExceptionsEnabled(false),
- ShouldUseExceptionTables(false) {}
- };
-} // end anonymous namespace.
-
// exceptionSettings() exists to share the logic between -cc1 and linker
// invocations.
-static ExceptionSettings exceptionSettings(const ArgList &Args,
- const llvm::Triple &Triple) {
- ExceptionSettings ES;
-
- // Are exceptions enabled by default?
- ES.ExceptionsEnabled = (Triple.getArch() != llvm::Triple::xcore);
-
- // This keeps track of whether exceptions were explicitly turned on or off.
- bool DidHaveExplicitExceptionFlag = false;
-
+static bool exceptionSettings(const ArgList &Args, const llvm::Triple &Triple) {
if (Arg *A = Args.getLastArg(options::OPT_fexceptions,
- options::OPT_fno_exceptions)) {
+ options::OPT_fno_exceptions))
if (A->getOption().matches(options::OPT_fexceptions))
- ES.ExceptionsEnabled = true;
- else
- ES.ExceptionsEnabled = false;
-
- DidHaveExplicitExceptionFlag = true;
- }
-
- // Exception tables and cleanups can be enabled with -fexceptions even if the
- // language itself doesn't support exceptions.
- if (ES.ExceptionsEnabled && DidHaveExplicitExceptionFlag)
- ES.ShouldUseExceptionTables = true;
+ return true;
- return ES;
+ return false;
}
/// addExceptionArgs - Adds exception related arguments to the driver command
@@ -1865,8 +1950,8 @@ static void addExceptionArgs(const ArgList &Args, types::ID InputType,
return;
}
- // Gather the exception settings from the command line arguments.
- ExceptionSettings ES = exceptionSettings(Args, Triple);
+ // Gather the exception settings from the command line arguments.
+ bool EH = exceptionSettings(Args, Triple);
// Obj-C exceptions are enabled by default, regardless of -fexceptions. This
// is not necessarily sensible, but follows GCC.
@@ -1876,31 +1961,27 @@ static void addExceptionArgs(const ArgList &Args, types::ID InputType,
true)) {
CmdArgs.push_back("-fobjc-exceptions");
- ES.ShouldUseExceptionTables |=
- shouldUseExceptionTablesForObjCExceptions(objcRuntime, Triple);
+ EH |= shouldUseExceptionTablesForObjCExceptions(objcRuntime, Triple);
}
if (types::isCXX(InputType)) {
- bool CXXExceptionsEnabled = ES.ExceptionsEnabled;
-
+ bool CXXExceptionsEnabled = Triple.getArch() != llvm::Triple::xcore;
if (Arg *A = Args.getLastArg(options::OPT_fcxx_exceptions,
options::OPT_fno_cxx_exceptions,
options::OPT_fexceptions,
- options::OPT_fno_exceptions)) {
- if (A->getOption().matches(options::OPT_fcxx_exceptions))
- CXXExceptionsEnabled = true;
- else if (A->getOption().matches(options::OPT_fno_cxx_exceptions))
- CXXExceptionsEnabled = false;
- }
+ options::OPT_fno_exceptions))
+ CXXExceptionsEnabled =
+ A->getOption().matches(options::OPT_fcxx_exceptions) ||
+ A->getOption().matches(options::OPT_fexceptions);
if (CXXExceptionsEnabled) {
CmdArgs.push_back("-fcxx-exceptions");
- ES.ShouldUseExceptionTables = true;
+ EH = true;
}
}
- if (ES.ShouldUseExceptionTables)
+ if (EH)
CmdArgs.push_back("-fexceptions");
}
@@ -1926,7 +2007,7 @@ static bool ShouldDisableDwarfDirectory(const ArgList &Args,
/// \brief Check whether the given input tree contains any compilation actions.
static bool ContainsCompileAction(const Action *A) {
- if (isa<CompileJobAction>(A))
+ if (isa<CompileJobAction>(A) || isa<BackendJobAction>(A))
return true;
for (const auto &Act : *A)
@@ -1994,8 +2075,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
} else if (Value == "-L") {
CmdArgs.push_back("-msave-temp-labels");
} else if (Value == "--fatal-warnings") {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-fatal-assembler-warnings");
+ CmdArgs.push_back("-massembler-fatal-warnings");
} else if (Value == "--noexecstack") {
CmdArgs.push_back("-mnoexecstack");
} else if (Value == "-compress-debug-sections" ||
@@ -2028,11 +2108,13 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
// Until ARM libraries are build separately, we have them all in one library
static StringRef getArchNameForCompilerRTLib(const ToolChain &TC) {
- if (TC.getArch() == llvm::Triple::arm ||
- TC.getArch() == llvm::Triple::armeb)
+ // FIXME: handle 64-bit
+ if (TC.getTriple().isOSWindows() &&
+ !TC.getTriple().isWindowsItaniumEnvironment())
+ return "i386";
+ if (TC.getArch() == llvm::Triple::arm || TC.getArch() == llvm::Triple::armeb)
return "arm";
- else
- return TC.getArchName();
+ return TC.getArchName();
}
static SmallString<128> getCompilerRTLibDir(const ToolChain &TC) {
@@ -2040,209 +2122,167 @@ static SmallString<128> getCompilerRTLibDir(const ToolChain &TC) {
SmallString<128> Res(TC.getDriver().ResourceDir);
const llvm::Triple &Triple = TC.getTriple();
// TC.getOS() yield "freebsd10.0" whereas "freebsd" is expected.
- StringRef OSLibName = (Triple.getOS() == llvm::Triple::FreeBSD) ?
- "freebsd" : TC.getOS();
+ StringRef OSLibName =
+ (Triple.getOS() == llvm::Triple::FreeBSD) ? "freebsd" : TC.getOS();
llvm::sys::path::append(Res, "lib", OSLibName);
return Res;
}
+static SmallString<128> getCompilerRT(const ToolChain &TC, StringRef Component,
+ bool Shared = false,
+ const char *Env = "") {
+ bool IsOSWindows = TC.getTriple().isOSWindows();
+ StringRef Arch = getArchNameForCompilerRTLib(TC);
+ const char *Prefix = IsOSWindows ? "" : "lib";
+ const char *Suffix =
+ Shared ? (IsOSWindows ? ".dll" : ".so") : (IsOSWindows ? ".lib" : ".a");
+
+ SmallString<128> Path = getCompilerRTLibDir(TC);
+ llvm::sys::path::append(Path, Prefix + Twine("clang_rt.") + Component + "-" +
+ Arch + Env + Suffix);
+
+ return Path;
+}
+
// This adds the static libclang_rt.builtins-arch.a directly to the command line
// FIXME: Make sure we can also emit shared objects if they're requested
// and available, check for possible errors, etc.
-static void addClangRTLinux(
- const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs) {
- SmallString<128> LibClangRT = getCompilerRTLibDir(TC);
- llvm::sys::path::append(LibClangRT, Twine("libclang_rt.builtins-") +
- getArchNameForCompilerRTLib(TC) +
- ".a");
-
- CmdArgs.push_back(Args.MakeArgString(LibClangRT));
- CmdArgs.push_back("-lgcc_s");
- if (TC.getDriver().CCCIsCXX())
- CmdArgs.push_back("-lgcc_eh");
+static void addClangRT(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, "builtins")));
+
+ if (!TC.getTriple().isOSWindows()) {
+ // FIXME: why do we link against gcc when we are using compiler-rt?
+ CmdArgs.push_back("-lgcc_s");
+ if (TC.getDriver().CCCIsCXX())
+ CmdArgs.push_back("-lgcc_eh");
+ }
}
-static void addProfileRT(
- const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs) {
- if (!(Args.hasArg(options::OPT_fprofile_arcs) ||
+static void addProfileRT(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ if (!(Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false) ||
Args.hasArg(options::OPT_fprofile_generate) ||
Args.hasArg(options::OPT_fprofile_instr_generate) ||
Args.hasArg(options::OPT_fcreate_profile) ||
Args.hasArg(options::OPT_coverage)))
return;
- // -fprofile-instr-generate requires position-independent code to build with
- // shared objects. Link against the right archive.
- const char *Lib = "libclang_rt.profile-";
- if (Args.hasArg(options::OPT_fprofile_instr_generate) &&
- Args.hasArg(options::OPT_shared))
- Lib = "libclang_rt.profile-pic-";
-
- SmallString<128> LibProfile = getCompilerRTLibDir(TC);
- llvm::sys::path::append(LibProfile,
- Twine(Lib) + getArchNameForCompilerRTLib(TC) + ".a");
-
- CmdArgs.push_back(Args.MakeArgString(LibProfile));
+ CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, "profile")));
}
-static SmallString<128> getSanitizerRTLibName(const ToolChain &TC,
- const StringRef Sanitizer,
- bool Shared) {
- // Sanitizer runtime has name "libclang_rt.<Sanitizer>-<ArchName>.{a,so}"
- // (or "libclang_rt.<Sanitizer>-<ArchName>-android.so for Android)
- const char *EnvSuffix =
- TC.getTriple().getEnvironment() == llvm::Triple::Android ? "-android" : "";
- SmallString<128> LibSanitizer = getCompilerRTLibDir(TC);
- llvm::sys::path::append(LibSanitizer,
- Twine("libclang_rt.") + Sanitizer + "-" +
- getArchNameForCompilerRTLib(TC) + EnvSuffix +
- (Shared ? ".so" : ".a"));
- return LibSanitizer;
+static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs, StringRef Sanitizer,
+ bool IsShared) {
+ const char *Env = TC.getTriple().getEnvironment() == llvm::Triple::Android
+ ? "-android"
+ : "";
+
+ // Static runtimes must be forced into executable, so we wrap them in
+ // whole-archive.
+ if (!IsShared)
+ CmdArgs.push_back("-whole-archive");
+ CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, Sanitizer, IsShared,
+ Env)));
+ if (!IsShared)
+ CmdArgs.push_back("-no-whole-archive");
}
-static void addSanitizerRTLinkFlags(const ToolChain &TC, const ArgList &Args,
+// Tries to use a file with the list of dynamic symbols that need to be exported
+// from the runtime library. Returns true if the file was found.
+static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs,
- const StringRef Sanitizer,
- bool BeforeLibStdCXX,
- bool ExportSymbols = true,
- bool LinkDeps = true) {
- SmallString<128> LibSanitizer =
- getSanitizerRTLibName(TC, Sanitizer, /*Shared*/ false);
-
- // Sanitizer runtime may need to come before -lstdc++ (or -lc++, libstdc++.a,
- // etc.) so that the linker picks custom versions of the global 'operator
- // new' and 'operator delete' symbols. We take the extreme (but simple)
- // strategy of inserting it at the front of the link command. It also
- // needs to be forced to end up in the executable, so wrap it in
- // whole-archive.
- SmallVector<const char *, 3> LibSanitizerArgs;
- LibSanitizerArgs.push_back("-whole-archive");
- LibSanitizerArgs.push_back(Args.MakeArgString(LibSanitizer));
- LibSanitizerArgs.push_back("-no-whole-archive");
-
- CmdArgs.insert(BeforeLibStdCXX ? CmdArgs.begin() : CmdArgs.end(),
- LibSanitizerArgs.begin(), LibSanitizerArgs.end());
-
- if (LinkDeps) {
- // Link sanitizer dependencies explicitly
- CmdArgs.push_back("-lpthread");
- CmdArgs.push_back("-lrt");
- CmdArgs.push_back("-lm");
- // There's no libdl on FreeBSD.
- if (TC.getTriple().getOS() != llvm::Triple::FreeBSD)
- CmdArgs.push_back("-ldl");
+ StringRef Sanitizer) {
+ SmallString<128> SanRT = getCompilerRT(TC, Sanitizer);
+ if (llvm::sys::fs::exists(SanRT + ".syms")) {
+ CmdArgs.push_back(Args.MakeArgString("--dynamic-list=" + SanRT + ".syms"));
+ return true;
}
+ return false;
+}
- // If possible, use a dynamic symbols file to export the symbols from the
- // runtime library. If we can't do so, use -export-dynamic instead to export
- // all symbols from the binary.
- if (ExportSymbols) {
- if (llvm::sys::fs::exists(LibSanitizer + ".syms"))
- CmdArgs.push_back(
- Args.MakeArgString("--dynamic-list=" + LibSanitizer + ".syms"));
- else
- CmdArgs.push_back("-export-dynamic");
- }
+static void linkSanitizerRuntimeDeps(const ToolChain &TC,
+ ArgStringList &CmdArgs) {
+ // Force linking against the system libraries sanitizers depends on
+ // (see PR15823 why this is necessary).
+ CmdArgs.push_back("--no-as-needed");
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-lrt");
+ CmdArgs.push_back("-lm");
+ // There's no libdl on FreeBSD.
+ if (TC.getTriple().getOS() != llvm::Triple::FreeBSD)
+ CmdArgs.push_back("-ldl");
}
-/// If AddressSanitizer is enabled, add appropriate linker flags (Linux).
-/// This needs to be called before we add the C run-time (malloc, etc).
-static void addAsanRT(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs, bool Shared, bool IsCXX) {
- if (Shared) {
- // Link dynamic runtime if necessary.
- SmallString<128> LibSanitizer =
- getSanitizerRTLibName(TC, "asan", Shared);
- CmdArgs.insert(CmdArgs.begin(), Args.MakeArgString(LibSanitizer));
+static void
+collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
+ SmallVectorImpl<StringRef> &SharedRuntimes,
+ SmallVectorImpl<StringRef> &StaticRuntimes,
+ SmallVectorImpl<StringRef> &HelperStaticRuntimes) {
+ const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
+ // Collect shared runtimes.
+ if (SanArgs.needsAsanRt() && SanArgs.needsSharedAsanRt()) {
+ SharedRuntimes.push_back("asan");
}
- // Do not link static runtime to DSOs or if compiling for Android.
+ // Collect static runtimes.
if (Args.hasArg(options::OPT_shared) ||
- (TC.getTriple().getEnvironment() == llvm::Triple::Android))
+ (TC.getTriple().getEnvironment() == llvm::Triple::Android)) {
+ // Don't link static runtimes into DSOs or if compiling for Android.
return;
-
- if (Shared) {
- addSanitizerRTLinkFlags(TC, Args, CmdArgs, "asan-preinit",
- /*BeforeLibStdCXX*/ true, /*ExportSymbols*/ false,
- /*LinkDeps*/ false);
- } else {
- addSanitizerRTLinkFlags(TC, Args, CmdArgs, "asan", true);
- if (IsCXX)
- addSanitizerRTLinkFlags(TC, Args, CmdArgs, "asan_cxx", true);
}
-}
-
-/// If ThreadSanitizer is enabled, add appropriate linker flags (Linux).
-/// This needs to be called before we add the C run-time (malloc, etc).
-static void addTsanRT(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
- if (!Args.hasArg(options::OPT_shared))
- addSanitizerRTLinkFlags(TC, Args, CmdArgs, "tsan", true);
-}
-
-/// If MemorySanitizer is enabled, add appropriate linker flags (Linux).
-/// This needs to be called before we add the C run-time (malloc, etc).
-static void addMsanRT(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
- if (!Args.hasArg(options::OPT_shared))
- addSanitizerRTLinkFlags(TC, Args, CmdArgs, "msan", true);
-}
-
-/// If LeakSanitizer is enabled, add appropriate linker flags (Linux).
-/// This needs to be called before we add the C run-time (malloc, etc).
-static void addLsanRT(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
- if (!Args.hasArg(options::OPT_shared))
- addSanitizerRTLinkFlags(TC, Args, CmdArgs, "lsan", true);
-}
-
-/// If UndefinedBehaviorSanitizer is enabled, add appropriate linker flags
-/// (Linux).
-static void addUbsanRT(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs, bool IsCXX,
- bool HasOtherSanitizerRt) {
- // Do not link runtime into shared libraries.
- if (Args.hasArg(options::OPT_shared))
- return;
-
- // Need a copy of sanitizer_common. This could come from another sanitizer
- // runtime; if we're not including one, include our own copy.
- if (!HasOtherSanitizerRt)
- addSanitizerRTLinkFlags(TC, Args, CmdArgs, "san", true, false);
-
- addSanitizerRTLinkFlags(TC, Args, CmdArgs, "ubsan", false, true);
-
- // Only include the bits of the runtime which need a C++ ABI library if
- // we're linking in C++ mode.
- if (IsCXX)
- addSanitizerRTLinkFlags(TC, Args, CmdArgs, "ubsan_cxx", false, true);
-}
-
-static void addDfsanRT(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
- if (!Args.hasArg(options::OPT_shared))
- addSanitizerRTLinkFlags(TC, Args, CmdArgs, "dfsan", true);
-}
-
-// Should be called before we add C++ ABI library.
-static void addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
+ if (SanArgs.needsAsanRt()) {
+ if (SanArgs.needsSharedAsanRt()) {
+ HelperStaticRuntimes.push_back("asan-preinit");
+ } else {
+ StaticRuntimes.push_back("asan");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("asan_cxx");
+ }
+ }
+ if (SanArgs.needsDfsanRt())
+ StaticRuntimes.push_back("dfsan");
+ if (SanArgs.needsLsanRt())
+ StaticRuntimes.push_back("lsan");
+ if (SanArgs.needsMsanRt())
+ StaticRuntimes.push_back("msan");
+ if (SanArgs.needsTsanRt())
+ StaticRuntimes.push_back("tsan");
+ // WARNING: UBSan should always go last.
+ if (SanArgs.needsUbsanRt()) {
+ // If UBSan is not combined with another sanitizer, we need to pull in
+ // sanitizer_common explicitly.
+ if (StaticRuntimes.empty())
+ HelperStaticRuntimes.push_back("san");
+ StaticRuntimes.push_back("ubsan");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("ubsan_cxx");
+ }
+}
+
+// Should be called before we add system libraries (C++ ABI, libstdc++/libc++,
+// C runtime, etc). Returns true if sanitizer system deps need to be linked in.
+static bool addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
- const SanitizerArgs &Sanitize = TC.getSanitizerArgs();
- const Driver &D = TC.getDriver();
- if (Sanitize.needsUbsanRt())
- addUbsanRT(TC, Args, CmdArgs, D.CCCIsCXX(),
- Sanitize.needsAsanRt() || Sanitize.needsTsanRt() ||
- Sanitize.needsMsanRt() || Sanitize.needsLsanRt());
- if (Sanitize.needsAsanRt())
- addAsanRT(TC, Args, CmdArgs, Sanitize.needsSharedAsanRt(), D.CCCIsCXX());
- if (Sanitize.needsTsanRt())
- addTsanRT(TC, Args, CmdArgs);
- if (Sanitize.needsMsanRt())
- addMsanRT(TC, Args, CmdArgs);
- if (Sanitize.needsLsanRt())
- addLsanRT(TC, Args, CmdArgs);
- if (Sanitize.needsDfsanRt())
- addDfsanRT(TC, Args, CmdArgs);
+ SmallVector<StringRef, 4> SharedRuntimes, StaticRuntimes,
+ HelperStaticRuntimes;
+ collectSanitizerRuntimes(TC, Args, SharedRuntimes, StaticRuntimes,
+ HelperStaticRuntimes);
+ for (auto RT : SharedRuntimes)
+ addSanitizerRuntime(TC, Args, CmdArgs, RT, true);
+ for (auto RT : HelperStaticRuntimes)
+ addSanitizerRuntime(TC, Args, CmdArgs, RT, false);
+ bool AddExportDynamic = false;
+ for (auto RT : StaticRuntimes) {
+ addSanitizerRuntime(TC, Args, CmdArgs, RT, false);
+ AddExportDynamic |= !addSanitizerDynamicList(TC, Args, CmdArgs, RT);
+ }
+ // If there is a static runtime with no dynamic list, force all the symbols
+ // to be dynamic to be sure we export sanitizer interface functions.
+ if (AddExportDynamic)
+ CmdArgs.push_back("-export-dynamic");
+ return !StaticRuntimes.empty();
}
static bool shouldUseFramePointerForTarget(const ArgList &Args,
@@ -2332,10 +2372,10 @@ static void SplitDebugInfo(const ToolChain &TC, Compilation &C,
Args.MakeArgString(TC.GetProgramPath("objcopy"));
// First extract the dwo sections.
- C.addCommand(new Command(JA, T, Exec, ExtractArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, T, Exec, ExtractArgs));
// Then remove them from the original .o file.
- C.addCommand(new Command(JA, T, Exec, StripArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, T, Exec, StripArgs));
}
/// \brief Vectorize at all optimization levels greater than 1 except for -Oz.
@@ -2405,6 +2445,16 @@ static std::string getMSCompatibilityVersion(const char *VersionStr) {
llvm::utostr_32(Build);
}
+// Claim options we don't want to warn if they are unused. We do this for
+// options that build systems might add but are unused when assembling or only
+// running the preprocessor for example.
+static void claimNoWarnArgs(const ArgList &Args) {
+ // Don't warn about unused -f(no-)?lto. This can happen when we're
+ // preprocessing, precompiling or assembling.
+ Args.ClaimAllArgs(options::OPT_flto);
+ Args.ClaimAllArgs(options::OPT_fno_lto);
+}
+
void Clang::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -2485,7 +2535,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
} else if (isa<VerifyPCHJobAction>(JA)) {
CmdArgs.push_back("-verify-pch");
} else {
- assert(isa<CompileJobAction>(JA) && "Invalid action for clang tool.");
+ assert((isa<CompileJobAction>(JA) || isa<BackendJobAction>(JA)) &&
+ "Invalid action for clang tool.");
if (JA.getType() == types::TY_Nothing) {
CmdArgs.push_back("-fsyntax-only");
@@ -2599,7 +2650,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
case llvm::Triple::aarch64:
- case llvm::Triple::arm64:
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
@@ -2679,9 +2729,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Note that these flags are trump-cards. Regardless of the order w.r.t. the
// PIC or PIE options above, if these show up, PIC is disabled.
llvm::Triple Triple(TripleStr);
- if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6) ||
- Triple.getArch() == llvm::Triple::arm64 ||
- Triple.getArch() == llvm::Triple::aarch64))
+ if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6)))
PIC = PIE = false;
if (Args.hasArg(options::OPT_static))
PIC = PIE = false;
@@ -2721,12 +2769,31 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ CmdArgs.push_back("-mthread-model");
+ if (Arg *A = Args.getLastArg(options::OPT_mthread_model))
+ CmdArgs.push_back(A->getValue());
+ else
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().getThreadModel()));
+
if (!Args.hasFlag(options::OPT_fmerge_all_constants,
options::OPT_fno_merge_all_constants))
CmdArgs.push_back("-fno-merge-all-constants");
// LLVM Code Generator Options.
+ if (Args.hasArg(options::OPT_frewrite_map_file) ||
+ Args.hasArg(options::OPT_frewrite_map_file_EQ)) {
+ for (arg_iterator
+ MFI = Args.filtered_begin(options::OPT_frewrite_map_file,
+ options::OPT_frewrite_map_file_EQ),
+ MFE = Args.filtered_end();
+ MFI != MFE; ++MFI) {
+ CmdArgs.push_back("-frewrite-map-file");
+ CmdArgs.push_back((*MFI)->getValue());
+ (*MFI)->claim();
+ }
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_Wframe_larger_than_EQ)) {
StringRef v = A->getValue();
CmdArgs.push_back("-mllvm");
@@ -3017,8 +3084,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- case llvm::Triple::arm64:
- case llvm::Triple::arm64_be:
AddAArch64TargetArgs(Args, CmdArgs);
break;
@@ -3029,6 +3094,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
AddMIPSTargetArgs(Args, CmdArgs);
break;
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ AddPPCTargetArgs(Args, CmdArgs);
+ break;
+
case llvm::Triple::sparc:
case llvm::Triple::sparcv9:
AddSparcTargetArgs(Args, CmdArgs);
@@ -3092,14 +3163,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// are preserved, all other debug options are substituted with "-g".
Args.ClaimAllArgs(options::OPT_g_Group);
if (Arg *A = Args.getLastArg(options::OPT_g_Group)) {
- if (A->getOption().matches(options::OPT_gline_tables_only)) {
+ if (A->getOption().matches(options::OPT_gline_tables_only) ||
+ A->getOption().matches(options::OPT_g1)) {
// FIXME: we should support specifying dwarf version with
// -gline-tables-only.
CmdArgs.push_back("-gline-tables-only");
- // Default is dwarf-2 for Darwin, OpenBSD and FreeBSD.
+ // Default is dwarf-2 for Darwin, OpenBSD, FreeBSD and Solaris.
const llvm::Triple &Triple = getToolChain().getTriple();
if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::OpenBSD ||
- Triple.getOS() == llvm::Triple::FreeBSD)
+ Triple.getOS() == llvm::Triple::FreeBSD ||
+ Triple.getOS() == llvm::Triple::Solaris)
CmdArgs.push_back("-gdwarf-2");
} else if (A->getOption().matches(options::OPT_gdwarf_2))
CmdArgs.push_back("-gdwarf-2");
@@ -3109,10 +3182,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-gdwarf-4");
else if (!A->getOption().matches(options::OPT_g0) &&
!A->getOption().matches(options::OPT_ggdb0)) {
- // Default is dwarf-2 for Darwin, OpenBSD and FreeBSD.
+ // Default is dwarf-2 for Darwin, OpenBSD, FreeBSD and Solaris.
const llvm::Triple &Triple = getToolChain().getTriple();
if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::OpenBSD ||
- Triple.getOS() == llvm::Triple::FreeBSD)
+ Triple.getOS() == llvm::Triple::FreeBSD ||
+ Triple.getOS() == llvm::Triple::Solaris)
CmdArgs.push_back("-gdwarf-2");
else
CmdArgs.push_back("-g");
@@ -3183,15 +3257,29 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_ftest_coverage) ||
Args.hasArg(options::OPT_coverage))
CmdArgs.push_back("-femit-coverage-notes");
- if (Args.hasArg(options::OPT_fprofile_arcs) ||
+ if (Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false) ||
Args.hasArg(options::OPT_coverage))
CmdArgs.push_back("-femit-coverage-data");
+ if (Args.hasArg(options::OPT_fcoverage_mapping) &&
+ !Args.hasArg(options::OPT_fprofile_instr_generate))
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << "-fcoverage-mapping" << "-fprofile-instr-generate";
+
+ if (Args.hasArg(options::OPT_fcoverage_mapping))
+ CmdArgs.push_back("-fcoverage-mapping");
+
if (C.getArgs().hasArg(options::OPT_c) ||
C.getArgs().hasArg(options::OPT_S)) {
if (Output.isFilename()) {
CmdArgs.push_back("-coverage-file");
- SmallString<128> CoverageFilename(Output.getFilename());
+ SmallString<128> CoverageFilename;
+ if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o)) {
+ CoverageFilename = FinalOutput->getValue();
+ } else {
+ CoverageFilename = llvm::sys::path::filename(Output.getBaseInput());
+ }
if (llvm::sys::path::is_relative(CoverageFilename.str())) {
SmallString<128> Pwd;
if (!llvm::sys::fs::current_path(Pwd)) {
@@ -3319,9 +3407,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
D.Diag(diag::warn_ignored_gcc_optimization) << (*it)->getAsString(Args);
}
- // Don't warn about unused -flto. This can happen when we're preprocessing or
- // precompiling.
- Args.ClaimAllArgs(options::OPT_flto);
+ claimNoWarnArgs(Args);
Args.AddAllArgs(CmdArgs, options::OPT_R_Group);
Args.AddAllArgs(CmdArgs, options::OPT_W_Group);
@@ -3344,8 +3430,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
else
Std->render(Args, CmdArgs);
+ // If -f(no-)trigraphs appears after the language standard flag, honor it.
if (Arg *A = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi,
- options::OPT_trigraphs))
+ options::OPT_ftrigraphs,
+ options::OPT_fno_trigraphs))
if (A != Std)
A->render(Args, CmdArgs);
} else {
@@ -3361,7 +3449,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
else if (IsWindowsMSVC)
CmdArgs.push_back("-std=c++11");
- Args.AddLastArg(CmdArgs, options::OPT_trigraphs);
+ Args.AddLastArg(CmdArgs, options::OPT_ftrigraphs,
+ options::OPT_fno_trigraphs);
}
// GCC's behavior for -Wwrite-strings is a bit strange:
@@ -3480,6 +3569,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue());
}
+ if (Arg *A = Args.getLastArg(options::OPT_fspell_checking_limit_EQ)) {
+ CmdArgs.push_back("-fspell-checking-limit");
+ CmdArgs.push_back(A->getValue());
+ }
+
// Pass -fmessage-length=.
CmdArgs.push_back("-fmessage-length");
if (Arg *A = Args.getLastArg(options::OPT_fmessage_length_EQ)) {
@@ -3531,15 +3625,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
const SanitizerArgs &Sanitize = getToolChain().getSanitizerArgs();
Sanitize.addArgs(Args, CmdArgs);
- if (!Args.hasFlag(options::OPT_fsanitize_recover,
- options::OPT_fno_sanitize_recover,
- true))
- CmdArgs.push_back("-fno-sanitize-recover");
-
- if (Args.hasFlag(options::OPT_fsanitize_undefined_trap_on_error,
- options::OPT_fno_sanitize_undefined_trap_on_error, false))
- CmdArgs.push_back("-fsanitize-undefined-trap-on-error");
-
// Report an error for -faltivec on anything other than PowerPC.
if (const Arg *A = Args.getLastArg(options::OPT_faltivec))
if (!(getToolChain().getArch() == llvm::Triple::ppc ||
@@ -3647,31 +3732,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
StringRef alignment = Args.getLastArgValue(options::OPT_mstack_alignment);
CmdArgs.push_back(Args.MakeArgString("-mstack-alignment=" + alignment));
}
- // -mkernel implies -mstrict-align; don't add the redundant option.
- if (!KernelOrKext) {
- if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
- options::OPT_munaligned_access)) {
- if (A->getOption().matches(options::OPT_mno_unaligned_access)) {
- CmdArgs.push_back("-backend-option");
- if (getToolChain().getTriple().getArch() == llvm::Triple::aarch64 ||
- getToolChain().getTriple().getArch() == llvm::Triple::aarch64_be ||
- getToolChain().getTriple().getArch() == llvm::Triple::arm64 ||
- getToolChain().getTriple().getArch() == llvm::Triple::arm64_be)
- CmdArgs.push_back("-aarch64-strict-align");
- else
- CmdArgs.push_back("-arm-strict-align");
- } else {
- CmdArgs.push_back("-backend-option");
- if (getToolChain().getTriple().getArch() == llvm::Triple::aarch64 ||
- getToolChain().getTriple().getArch() == llvm::Triple::aarch64_be ||
- getToolChain().getTriple().getArch() == llvm::Triple::arm64 ||
- getToolChain().getTriple().getArch() == llvm::Triple::arm64_be)
- CmdArgs.push_back("-aarch64-no-strict-align");
- else
- CmdArgs.push_back("-arm-no-strict-align");
- }
- }
- }
+
+ if (getToolChain().getTriple().getArch() == llvm::Triple::aarch64 ||
+ getToolChain().getTriple().getArch() == llvm::Triple::aarch64_be)
+ CmdArgs.push_back("-fallow-half-arguments-and-returns");
if (Arg *A = Args.getLastArg(options::OPT_mrestrict_it,
options::OPT_mno_restrict_it)) {
@@ -3738,14 +3802,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fblocks-runtime-optional");
}
- // -fmodules enables modules (off by default). However, for C++/Objective-C++,
- // users must also pass -fcxx-modules. The latter flag will disappear once the
- // modules implementation is solid for C++/Objective-C++ programs as well.
+ // -fmodules enables modules (off by default).
+ // Users can pass -fno-cxx-modules to turn off modules support for
+ // C++/Objective-C++ programs, which is a little less mature.
bool HaveModules = false;
if (Args.hasFlag(options::OPT_fmodules, options::OPT_fno_modules, false)) {
bool AllowedInCXX = Args.hasFlag(options::OPT_fcxx_modules,
options::OPT_fno_cxx_modules,
- false);
+ true);
if (AllowedInCXX || !types::isCXX(InputType)) {
CmdArgs.push_back("-fmodules");
HaveModules = true;
@@ -3777,15 +3841,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fmodule-name specifies the module that is currently being built (or
// used for header checking by -fmodule-maps).
- if (Arg *A = Args.getLastArg(options::OPT_fmodule_name))
- A->render(Args, CmdArgs);
+ Args.AddLastArg(CmdArgs, options::OPT_fmodule_name);
- // -fmodule-map-file can be used to specify a file containing module
+ // -fmodule-map-file can be used to specify files containing module
// definitions.
- if (Arg *A = Args.getLastArg(options::OPT_fmodule_map_file))
- A->render(Args, CmdArgs);
+ Args.AddAllArgs(CmdArgs, options::OPT_fmodule_map_file);
+
+ // -fmodule-file can be used to specify files containing precompiled modules.
+ Args.AddAllArgs(CmdArgs, options::OPT_fmodule_file);
- // -fmodule-cache-path specifies where our module files should be written.
+ // -fmodule-cache-path specifies where our implicitly-built module files
+ // should be written.
SmallString<128> ModuleCachePath;
if (Arg *A = Args.getLastArg(options::OPT_fmodules_cache_path))
ModuleCachePath = A->getValue();
@@ -3813,14 +3879,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (HaveModules && C.isForDiagnostics()) {
SmallString<128> VFSDir(Output.getFilename());
llvm::sys::path::replace_extension(VFSDir, ".cache");
+ // Add the cache directory as a temp so the crash diagnostics pick it up.
+ C.addTempFile(Args.MakeArgString(VFSDir));
+
llvm::sys::path::append(VFSDir, "vfs");
CmdArgs.push_back("-module-dependency-dir");
CmdArgs.push_back(Args.MakeArgString(VFSDir));
}
- if (Arg *A = Args.getLastArg(options::OPT_fmodules_user_build_path))
- if (HaveModules)
- A->render(Args, CmdArgs);
+ if (HaveModules)
+ Args.AddLastArg(CmdArgs, options::OPT_fmodules_user_build_path);
// Pass through all -fmodules-ignore-macro arguments.
Args.AddAllArgs(CmdArgs, options::OPT_fmodules_ignore_macro);
@@ -3829,8 +3897,23 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fbuild_session_timestamp);
+ if (Arg *A = Args.getLastArg(options::OPT_fbuild_session_file)) {
+ if (Args.hasArg(options::OPT_fbuild_session_timestamp))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-fbuild-session-timestamp";
+
+ llvm::sys::fs::file_status Status;
+ if (llvm::sys::fs::status(A->getValue(), Status))
+ D.Diag(diag::err_drv_no_such_file) << A->getValue();
+ char TimeStamp[48];
+ snprintf(TimeStamp, sizeof(TimeStamp), "-fbuild-session-timestamp=%" PRIu64,
+ (uint64_t)Status.getLastModificationTime().toEpochTime());
+ CmdArgs.push_back(Args.MakeArgString(TimeStamp));
+ }
+
if (Args.getLastArg(options::OPT_fmodules_validate_once_per_build_session)) {
- if (!Args.getLastArg(options::OPT_fbuild_session_timestamp))
+ if (!Args.getLastArg(options::OPT_fbuild_session_timestamp,
+ options::OPT_fbuild_session_file))
D.Diag(diag::err_drv_modules_validate_once_requires_timestamp);
Args.AddLastArg(CmdArgs,
@@ -4085,6 +4168,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fpack-struct=1");
}
+ // Handle -fmax-type-align=N and -fno-type-align
+ bool SkipMaxTypeAlign = Args.hasArg(options::OPT_fno_max_type_align);
+ if (Arg *A = Args.getLastArg(options::OPT_fmax_type_align_EQ)) {
+ if (!SkipMaxTypeAlign) {
+ std::string MaxTypeAlignStr = "-fmax-type-align=";
+ MaxTypeAlignStr += A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(MaxTypeAlignStr));
+ }
+ } else if (getToolChain().getTriple().isOSDarwin()) {
+ if (!SkipMaxTypeAlign) {
+ std::string MaxTypeAlignStr = "-fmax-type-align=16";
+ CmdArgs.push_back(Args.MakeArgString(MaxTypeAlignStr));
+ }
+ }
+
if (KernelOrKext || isNoCommonDefault(getToolChain().getTriple())) {
if (!Args.hasArg(options::OPT_fcommon))
CmdArgs.push_back("-fno-common");
@@ -4116,6 +4214,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
D.Diag(diag::err_drv_invalid_value) << inputCharset->getAsString(Args) << value;
}
+ // -fexec_charset=UTF-8 is default. Reject others
+ if (Arg *execCharset = Args.getLastArg(
+ options::OPT_fexec_charset_EQ)) {
+ StringRef value = execCharset->getValue();
+ if (value != "UTF-8")
+ D.Diag(diag::err_drv_invalid_value) << execCharset->getAsString(Args) << value;
+ }
+
// -fcaret-diagnostics is default.
if (!Args.hasFlag(options::OPT_fcaret_diagnostics,
options::OPT_fno_caret_diagnostics, true))
@@ -4321,18 +4427,27 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Forward -Xclang arguments to -cc1, and -mllvm arguments to the LLVM option
// parser.
Args.AddAllArgValues(CmdArgs, options::OPT_Xclang);
+ bool OptDisabled = false;
for (arg_iterator it = Args.filtered_begin(options::OPT_mllvm),
ie = Args.filtered_end(); it != ie; ++it) {
(*it)->claim();
// We translate this by hand to the -cc1 argument, since nightly test uses
// it and developers have been trained to spell it with -mllvm.
- if (StringRef((*it)->getValue(0)) == "-disable-llvm-optzns")
+ if (StringRef((*it)->getValue(0)) == "-disable-llvm-optzns") {
CmdArgs.push_back("-disable-llvm-optzns");
- else
+ OptDisabled = true;
+ } else
(*it)->render(Args, CmdArgs);
}
+ // With -save-temps, we want to save the unoptimized bitcode output from the
+ // CompileJobAction, so disable optimizations if they are not already
+ // disabled.
+ if (Args.hasArg(options::OPT_save_temps) && !OptDisabled &&
+ isa<CompileJobAction>(JA))
+ CmdArgs.push_back("-disable-llvm-optzns");
+
if (Output.getType() == types::TY_Dependencies) {
// Handled with other dependency code.
} else if (Output.isFilename()) {
@@ -4365,8 +4480,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
SmallString<256> Flags;
Flags += Exec;
for (unsigned i = 0, e = OriginalArgs.size(); i != e; ++i) {
+ SmallString<128> EscapedArg;
+ EscapeSpacesAndBackslashes(OriginalArgs[i], EscapedArg);
Flags += " ";
- Flags += OriginalArgs[i];
+ Flags += EscapedArg;
}
CmdArgs.push_back("-dwarf-debug-flags");
CmdArgs.push_back(Args.MakeArgString(Flags.str()));
@@ -4376,7 +4493,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// can propagate it to the backend.
bool SplitDwarf = Args.hasArg(options::OPT_gsplit_dwarf) &&
getToolChain().getTriple().isOSLinux() &&
- (isa<AssembleJobAction>(JA) || isa<CompileJobAction>(JA));
+ (isa<AssembleJobAction>(JA) || isa<CompileJobAction>(JA) ||
+ isa<BackendJobAction>(JA));
const char *SplitDwarfOut;
if (SplitDwarf) {
CmdArgs.push_back("-split-dwarf-file");
@@ -4388,18 +4506,19 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT__SLASH_fallback) &&
Output.getType() == types::TY_Object &&
(InputType == types::TY_C || InputType == types::TY_CXX)) {
- Command *CLCommand = getCLFallback()->GetCommand(C, JA, Output, Inputs,
- Args, LinkingOutput);
- C.addCommand(new FallbackCommand(JA, *this, Exec, CmdArgs, CLCommand));
+ auto CLCommand =
+ getCLFallback()->GetCommand(C, JA, Output, Inputs, Args, LinkingOutput);
+ C.addCommand(llvm::make_unique<FallbackCommand>(JA, *this, Exec, CmdArgs,
+ std::move(CLCommand)));
} else {
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
// Handle the debug info splitting at object creation time if we're
// creating an object.
// TODO: Currently only works on linux with newer objcopy.
- if (SplitDwarf && !isa<CompileJobAction>(JA))
+ if (SplitDwarf && !isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA))
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output, SplitDwarfOut);
if (Arg *A = Args.getLastArg(options::OPT_pg))
@@ -4716,6 +4835,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// and "clang -emit-llvm -c foo.s"
Args.ClaimAllArgs(options::OPT_emit_llvm);
+ claimNoWarnArgs(Args);
+
// Invoke ourselves in -cc1as mode.
//
// FIXME: Implement custom jobs for internal actions.
@@ -4795,8 +4916,10 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = getToolChain().getDriver().getClangProgramPath();
Flags += Exec;
for (unsigned i = 0, e = OriginalArgs.size(); i != e; ++i) {
+ SmallString<128> EscapedArg;
+ EscapeSpacesAndBackslashes(OriginalArgs[i], EscapedArg);
Flags += " ";
- Flags += OriginalArgs[i];
+ Flags += EscapedArg;
}
CmdArgs.push_back("-dwarf-debug-flags");
CmdArgs.push_back(Args.MakeArgString(Flags.str()));
@@ -4827,7 +4950,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Input.getFilename());
const char *Exec = getToolChain().getDriver().getClangProgramPath();
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
// Handle the debug info splitting at object creation time if we're
// creating an object.
@@ -4838,6 +4961,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
SplitDebugName(Args, Inputs));
}
+void GnuTool::anchor() {}
+
void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -4870,19 +4995,10 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
RenderExtraToolArgs(JA, CmdArgs);
// If using a driver driver, force the arch.
- llvm::Triple::ArchType Arch = getToolChain().getArch();
if (getToolChain().getTriple().isOSDarwin()) {
CmdArgs.push_back("-arch");
-
- // FIXME: Remove these special cases.
- if (Arch == llvm::Triple::ppc)
- CmdArgs.push_back("ppc");
- else if (Arch == llvm::Triple::ppc64)
- CmdArgs.push_back("ppc64");
- else if (Arch == llvm::Triple::ppc64le)
- CmdArgs.push_back("ppc64le");
- else
- CmdArgs.push_back(Args.MakeArgString(getToolChain().getArchName()));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().getDefaultUniversalArchName()));
}
// Try to force gcc to match the tool chain we want, if we recognize
@@ -4890,6 +5006,7 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
//
// FIXME: The triple class should directly provide the information we want
// here.
+ llvm::Triple::ArchType Arch = getToolChain().getArch();
if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::ppc)
CmdArgs.push_back("-m32");
else if (Arch == llvm::Triple::x86_64 || Arch == llvm::Triple::ppc64 ||
@@ -4960,7 +5077,7 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void gcc::Preprocess::RenderExtraToolArgs(const JobAction &JA,
@@ -5000,6 +5117,7 @@ void hexagon::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
const Driver &D = getToolChain().getDriver();
ArgStringList CmdArgs;
@@ -5056,7 +5174,7 @@ void hexagon::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const char *GCCName = "hexagon-as";
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void hexagon::Link::RenderExtraToolArgs(const JobAction &JA,
@@ -5133,8 +5251,8 @@ void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA,
const std::string MarchSuffix = "/" + MarchString;
const std::string G0Suffix = "/G0";
const std::string MarchG0Suffix = MarchSuffix + G0Suffix;
- const std::string RootDir = toolchains::Hexagon_TC::GetGnuDir(D.InstalledDir)
- + "/";
+ const std::string RootDir =
+ toolchains::Hexagon_TC::GetGnuDir(D.InstalledDir, Args) + "/";
const std::string StartFilesDir = RootDir
+ "hexagon/lib"
+ (buildingLib
@@ -5222,7 +5340,8 @@ void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA,
}
std::string Linker = ToolChain.GetProgramPath("hexagon-ld");
- C.addCommand(new Command(JA, *this, Args.MakeArgString(Linker), CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
+ CmdArgs));
}
// Hexagon tools end.
@@ -5287,12 +5406,12 @@ const char *arm::getLLVMArchSuffixForARM(StringRef CPU) {
.Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s", "v6")
.Cases("arm1176jzf-s", "mpcorenovfp", "mpcore", "v6")
.Cases("arm1156t2-s", "arm1156t2f-s", "v6t2")
- .Cases("cortex-a5", "cortex-a7", "cortex-a8", "cortex-a9-mp", "v7")
- .Cases("cortex-a9", "cortex-a12", "cortex-a15", "krait", "v7")
+ .Cases("cortex-a5", "cortex-a7", "cortex-a8", "v7")
+ .Cases("cortex-a9", "cortex-a12", "cortex-a15", "cortex-a17", "krait", "v7")
.Cases("cortex-r4", "cortex-r5", "v7r")
.Case("cortex-m0", "v6m")
.Case("cortex-m3", "v7m")
- .Case("cortex-m4", "v7em")
+ .Cases("cortex-m4", "cortex-m7", "v7em")
.Case("swift", "v7s")
.Case("cyclone", "v8")
.Cases("cortex-a53", "cortex-a57", "v8")
@@ -5304,6 +5423,11 @@ bool mips::hasMipsAbiArg(const ArgList &Args, const char *Value) {
return A && (A->getValue() == StringRef(Value));
}
+bool mips::isUCLibc(const ArgList &Args) {
+ Arg *A = Args.getLastArg(options::OPT_m_libc_Group);
+ return A && A->getOption().matches(options::OPT_muclibc);
+}
+
bool mips::isNaN2008(const ArgList &Args, const llvm::Triple &Triple) {
if (Arg *NaNArg = Args.getLastArg(options::OPT_mnan_EQ))
return llvm::StringSwitch<bool>(NaNArg->getValue())
@@ -5360,8 +5484,9 @@ llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
.Cases("arm", "armv4t", "armv5", "armv6", "armv6m", llvm::Triple::arm)
.Cases("armv7", "armv7em", "armv7k", "armv7m", llvm::Triple::arm)
.Cases("armv7s", "xscale", llvm::Triple::arm)
- .Case("arm64", llvm::Triple::arm64)
+ .Case("arm64", llvm::Triple::aarch64)
.Case("r600", llvm::Triple::r600)
+ .Case("amdgcn", llvm::Triple::amdgcn)
.Case("nvptx", llvm::Triple::nvptx)
.Case("nvptx64", llvm::Triple::nvptx64)
.Case("amdil", llvm::Triple::amdil)
@@ -5478,7 +5603,7 @@ void darwin::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void darwin::MachOTool::anchor() {}
@@ -5694,6 +5819,12 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
assert(Output.getType() == types::TY_Image && "Invalid linker output type.");
+ // If the number of arguments surpasses the system limits, we will encode the
+ // input files in a separate file, shortening the command line. To this end,
+ // build a list of input file names that can be passed via a file with the
+ // -filelist linker option.
+ llvm::opt::ArgStringList InputFileList;
+
// The logic here is derived from gcc's behavior; most of which
// comes from specs (starting with link_command). Consult gcc for
// more information.
@@ -5707,7 +5838,7 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("touch"));
CmdArgs.push_back(Output.getFilename());
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
return;
}
@@ -5762,7 +5893,23 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
}
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
-
+ // Build the input file for -filelist (list of linker input files) in case we
+ // need it later
+ for (const auto &II : Inputs) {
+ if (!II.isFilename()) {
+ // This is a linker input argument.
+ // We cannot mix input arguments and file names in a -filelist input, thus
+ // we prematurely stop our list (remaining files shall be passed as
+ // arguments).
+ if (InputFileList.size() > 0)
+ break;
+
+ continue;
+ }
+
+ InputFileList.push_back(II.getFilename());
+ }
+
if (isObjCRuntimeLinked(Args) &&
!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
@@ -5805,7 +5952,10 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ std::unique_ptr<Command> Cmd =
+ llvm::make_unique<Command>(JA, *this, Exec, CmdArgs);
+ Cmd->setInputFileList(std::move(InputFileList));
+ C.addCommand(std::move(Cmd));
}
void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA,
@@ -5827,7 +5977,7 @@ void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("lipo"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
@@ -5847,7 +5997,7 @@ void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("dsymutil"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
@@ -5870,7 +6020,7 @@ void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("dwarfdump"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void solaris::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
@@ -5878,6 +6028,7 @@ void solaris::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
ArgStringList CmdArgs;
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
@@ -5890,7 +6041,7 @@ void solaris::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void solaris::Link::ConstructJob(Compilation &C, const JobAction &JA,
@@ -5995,115 +6146,7 @@ void solaris::Link::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
-}
-
-void auroraux::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- ArgStringList CmdArgs;
-
- Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
- options::OPT_Xassembler);
-
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
-
- for (const auto &II : Inputs)
- CmdArgs.push_back(II.getFilename());
-
- const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("gas"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
-}
-
-void auroraux::Link::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- ArgStringList CmdArgs;
-
- if ((!Args.hasArg(options::OPT_nostdlib)) &&
- (!Args.hasArg(options::OPT_shared))) {
- CmdArgs.push_back("-e");
- CmdArgs.push_back("_start");
- }
-
- if (Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("-Bstatic");
- CmdArgs.push_back("-dn");
- } else {
-// CmdArgs.push_back("--eh-frame-hdr");
- CmdArgs.push_back("-Bdynamic");
- if (Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back("-shared");
- } else {
- CmdArgs.push_back("--dynamic-linker");
- CmdArgs.push_back("/lib/ld.so.1"); // 64Bit Path /lib/amd64/ld.so.1
- }
- }
-
- if (Output.isFilename()) {
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
- }
-
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
- if (!Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("crt1.o")));
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("crti.o")));
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("crtbegin.o")));
- } else {
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("crti.o")));
- }
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("crtn.o")));
- }
-
- CmdArgs.push_back(Args.MakeArgString("-L/opt/gcc4/lib/gcc/"
- + getToolChain().getTripleString()
- + "/4.2.4"));
-
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
-
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
-
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
- // FIXME: For some reason GCC passes -lgcc before adding
- // the default system libraries. Just mimic this for now.
- CmdArgs.push_back("-lgcc");
-
- if (Args.hasArg(options::OPT_pthread))
- CmdArgs.push_back("-pthread");
- if (!Args.hasArg(options::OPT_shared))
- CmdArgs.push_back("-lc");
- CmdArgs.push_back("-lgcc");
- }
-
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
- if (!Args.hasArg(options::OPT_shared))
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("crtend.o")));
- }
-
- addProfileRT(getToolChain(), Args, CmdArgs);
-
- const char *Exec =
- Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void openbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6111,6 +6154,7 @@ void openbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
ArgStringList CmdArgs;
bool NeedsKPIC = false;
@@ -6173,7 +6217,7 @@ void openbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6305,7 +6349,7 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void bitrig::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6313,6 +6357,7 @@ void bitrig::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
ArgStringList CmdArgs;
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
@@ -6325,7 +6370,7 @@ void bitrig::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void bitrig::Link::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6441,7 +6486,7 @@ void bitrig::Link::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void freebsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6449,6 +6494,7 @@ void freebsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
ArgStringList CmdArgs;
// When building 32-bit code on FreeBSD/amd64, we have to explicitly
@@ -6522,7 +6568,7 @@ void freebsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6621,7 +6667,7 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddAllArgs(CmdArgs, options::OPT_L);
- const ToolChain::path_list Paths = ToolChain.getFilePaths();
+ const ToolChain::path_list &Paths = ToolChain.getFilePaths();
for (const auto &Path : Paths)
CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path));
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
@@ -6634,6 +6680,7 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (D.IsUsingLTO(Args))
AddGoldPlugin(ToolChain, Args, CmdArgs);
+ bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib) &&
@@ -6645,6 +6692,8 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
else
CmdArgs.push_back("-lm");
}
+ if (NeedsSanitizerDeps)
+ linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
// FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
// the default system libraries. Just mimic this for now.
if (Args.hasArg(options::OPT_pg))
@@ -6699,13 +6748,11 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
- addSanitizerRuntimes(getToolChain(), Args, CmdArgs);
-
addProfileRT(ToolChain, Args, CmdArgs);
const char *Exec =
Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void netbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6713,6 +6760,7 @@ void netbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
ArgStringList CmdArgs;
// GNU as needs different flags for creating the correct output format
@@ -6779,7 +6827,7 @@ void netbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString((getToolChain().GetProgramPath("as")));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void netbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6815,9 +6863,7 @@ void netbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("elf_i386");
break;
case llvm::Triple::arm:
- case llvm::Triple::armeb:
case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
CmdArgs.push_back("-m");
switch (getToolChain().getTriple().getEnvironment()) {
case llvm::Triple::EABI:
@@ -6833,6 +6879,23 @@ void netbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
break;
}
break;
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ CmdArgs.push_back("-m");
+ switch (getToolChain().getTriple().getEnvironment()) {
+ case llvm::Triple::EABI:
+ case llvm::Triple::GNUEABI:
+ CmdArgs.push_back("armelfb_nbsd_eabi");
+ break;
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABIHF:
+ CmdArgs.push_back("armelfb_nbsd_eabihf");
+ break;
+ default:
+ CmdArgs.push_back("armelfb_nbsd");
+ break;
+ }
+ break;
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
if (mips::hasMipsAbiArg(Args, "32")) {
@@ -6849,6 +6912,16 @@ void netbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("elf64ltsmip");
}
break;
+ case llvm::Triple::ppc:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf32ppc_nbsd");
+ break;
+
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf64ppc");
+ break;
case llvm::Triple::sparc:
CmdArgs.push_back("-m");
@@ -6901,12 +6974,16 @@ void netbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
unsigned Major, Minor, Micro;
getToolChain().getTriple().getOSVersion(Major, Minor, Micro);
bool useLibgcc = true;
- if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 40) || Major == 0) {
+ if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 49) || Major == 0) {
switch(getToolChain().getArch()) {
+ case llvm::Triple::aarch64:
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
useLibgcc = false;
@@ -6958,7 +7035,7 @@ void netbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
addProfileRT(getToolChain(), Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void gnutools::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6966,6 +7043,8 @@ void gnutools::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+
ArgStringList CmdArgs;
bool NeedsKPIC = false;
@@ -7131,7 +7210,7 @@ void gnutools::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
// Handle the debug info splitting at object creation time if we're
// creating an object.
@@ -7175,8 +7254,8 @@ static void AddLibgcc(const llvm::Triple &Triple, const Driver &D,
CmdArgs.push_back("-ldl");
}
-static StringRef getLinuxDynamicLinker(const ArgList &Args,
- const toolchains::Linux &ToolChain) {
+static std::string getLinuxDynamicLinker(const ArgList &Args,
+ const toolchains::Linux &ToolChain) {
if (ToolChain.getTriple().getEnvironment() == llvm::Triple::Android) {
if (ToolChain.getTriple().isArch64Bit())
return "/system/bin/linker64";
@@ -7185,11 +7264,9 @@ static StringRef getLinuxDynamicLinker(const ArgList &Args,
} else if (ToolChain.getArch() == llvm::Triple::x86 ||
ToolChain.getArch() == llvm::Triple::sparc)
return "/lib/ld-linux.so.2";
- else if (ToolChain.getArch() == llvm::Triple::aarch64 ||
- ToolChain.getArch() == llvm::Triple::arm64)
+ else if (ToolChain.getArch() == llvm::Triple::aarch64)
return "/lib/ld-linux-aarch64.so.1";
- else if (ToolChain.getArch() == llvm::Triple::aarch64_be ||
- ToolChain.getArch() == llvm::Triple::arm64_be)
+ else if (ToolChain.getArch() == llvm::Triple::aarch64_be)
return "/lib/ld-linux-aarch64_be.so.1";
else if (ToolChain.getArch() == llvm::Triple::arm ||
ToolChain.getArch() == llvm::Triple::thumb) {
@@ -7204,24 +7281,38 @@ static StringRef getLinuxDynamicLinker(const ArgList &Args,
else
return "/lib/ld-linux.so.3"; /* TODO: check which dynamic linker name. */
} else if (ToolChain.getArch() == llvm::Triple::mips ||
- ToolChain.getArch() == llvm::Triple::mipsel) {
- if (mips::isNaN2008(Args, ToolChain.getTriple()))
- return "/lib/ld-linux-mipsn8.so.1";
- return "/lib/ld.so.1";
- } else if (ToolChain.getArch() == llvm::Triple::mips64 ||
+ ToolChain.getArch() == llvm::Triple::mipsel ||
+ ToolChain.getArch() == llvm::Triple::mips64 ||
ToolChain.getArch() == llvm::Triple::mips64el) {
- if (mips::hasMipsAbiArg(Args, "n32"))
- return mips::isNaN2008(Args, ToolChain.getTriple())
- ? "/lib32/ld-linux-mipsn8.so.1" : "/lib32/ld.so.1";
- return mips::isNaN2008(Args, ToolChain.getTriple())
- ? "/lib64/ld-linux-mipsn8.so.1" : "/lib64/ld.so.1";
+ StringRef CPUName;
+ StringRef ABIName;
+ mips::getMipsCPUAndABI(Args, ToolChain.getTriple(), CPUName, ABIName);
+ bool IsNaN2008 = mips::isNaN2008(Args, ToolChain.getTriple());
+
+ StringRef LibDir = llvm::StringSwitch<llvm::StringRef>(ABIName)
+ .Case("o32", "/lib")
+ .Case("n32", "/lib32")
+ .Case("n64", "/lib64")
+ .Default("/lib");
+ StringRef LibName;
+ if (mips::isUCLibc(Args))
+ LibName = IsNaN2008 ? "ld-uClibc-mipsn8.so.0" : "ld-uClibc.so.0";
+ else
+ LibName = IsNaN2008 ? "ld-linux-mipsn8.so.1" : "ld.so.1";
+
+ return (LibDir + "/" + LibName).str();
} else if (ToolChain.getArch() == llvm::Triple::ppc)
return "/lib/ld.so.1";
- else if (ToolChain.getArch() == llvm::Triple::ppc64 ||
- ToolChain.getArch() == llvm::Triple::systemz)
+ else if (ToolChain.getArch() == llvm::Triple::ppc64) {
+ if (ppc::hasPPCAbiArg(Args, "elfv2"))
+ return "/lib64/ld64.so.2";
return "/lib64/ld64.so.1";
- else if (ToolChain.getArch() == llvm::Triple::ppc64le)
+ } else if (ToolChain.getArch() == llvm::Triple::ppc64le) {
+ if (ppc::hasPPCAbiArg(Args, "elfv1"))
+ return "/lib64/ld64.so.1";
return "/lib64/ld64.so.2";
+ } else if (ToolChain.getArch() == llvm::Triple::systemz)
+ return "/lib64/ld64.so.1";
else if (ToolChain.getArch() == llvm::Triple::sparcv9)
return "/lib64/ld-linux.so.2";
else if (ToolChain.getArch() == llvm::Triple::x86_64 &&
@@ -7232,13 +7323,19 @@ static StringRef getLinuxDynamicLinker(const ArgList &Args,
}
static void AddRunTimeLibs(const ToolChain &TC, const Driver &D,
- ArgStringList &CmdArgs, const ArgList &Args) {
+ ArgStringList &CmdArgs, const ArgList &Args) {
// Make use of compiler-rt if --rtlib option is used
ToolChain::RuntimeLibType RLT = TC.GetRuntimeLibType(Args);
- switch(RLT) {
+ switch (RLT) {
case ToolChain::RLT_CompilerRT:
- addClangRTLinux(TC, Args, CmdArgs);
+ switch (TC.getTriple().getOS()) {
+ default: llvm_unreachable("unsupported OS");
+ case llvm::Triple::Win32:
+ case llvm::Triple::Linux:
+ addClangRT(TC, Args, CmdArgs);
+ break;
+ }
break;
case ToolChain::RLT_Libgcc:
AddLibgcc(TC.getTriple(), D, CmdArgs, Args);
@@ -7246,6 +7343,53 @@ static void AddRunTimeLibs(const ToolChain &TC, const Driver &D,
}
}
+static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
+ switch (T.getArch()) {
+ case llvm::Triple::x86:
+ return "elf_i386";
+ case llvm::Triple::aarch64:
+ return "aarch64linux";
+ case llvm::Triple::aarch64_be:
+ return "aarch64_be_linux";
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ return "armelf_linux_eabi";
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ return "armebelf_linux_eabi"; /* TODO: check which NAME. */
+ case llvm::Triple::ppc:
+ return "elf32ppclinux";
+ case llvm::Triple::ppc64:
+ return "elf64ppc";
+ case llvm::Triple::ppc64le:
+ return "elf64lppc";
+ case llvm::Triple::sparc:
+ return "elf32_sparc";
+ case llvm::Triple::sparcv9:
+ return "elf64_sparc";
+ case llvm::Triple::mips:
+ return "elf32btsmip";
+ case llvm::Triple::mipsel:
+ return "elf32ltsmip";
+ case llvm::Triple::mips64:
+ if (mips::hasMipsAbiArg(Args, "n32"))
+ return "elf32btsmipn32";
+ return "elf64btsmip";
+ case llvm::Triple::mips64el:
+ if (mips::hasMipsAbiArg(Args, "n32"))
+ return "elf32ltsmipn32";
+ return "elf64ltsmip";
+ case llvm::Triple::systemz:
+ return "elf64_s390";
+ case llvm::Triple::x86_64:
+ if (T.getEnvironment() == llvm::Triple::GNUX32)
+ return "elf32_x86_64";
+ return "elf_x86_64";
+ default:
+ llvm_unreachable("Unexpected arch");
+ }
+}
+
void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -7295,53 +7439,7 @@ void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA,
}
CmdArgs.push_back("-m");
- if (ToolChain.getArch() == llvm::Triple::x86)
- CmdArgs.push_back("elf_i386");
- else if (ToolChain.getArch() == llvm::Triple::aarch64 ||
- ToolChain.getArch() == llvm::Triple::arm64)
- CmdArgs.push_back("aarch64linux");
- else if (ToolChain.getArch() == llvm::Triple::aarch64_be ||
- ToolChain.getArch() == llvm::Triple::arm64_be)
- CmdArgs.push_back("aarch64_be_linux");
- else if (ToolChain.getArch() == llvm::Triple::arm
- || ToolChain.getArch() == llvm::Triple::thumb)
- CmdArgs.push_back("armelf_linux_eabi");
- else if (ToolChain.getArch() == llvm::Triple::armeb
- || ToolChain.getArch() == llvm::Triple::thumbeb)
- CmdArgs.push_back("armebelf_linux_eabi"); /* TODO: check which NAME. */
- else if (ToolChain.getArch() == llvm::Triple::ppc)
- CmdArgs.push_back("elf32ppclinux");
- else if (ToolChain.getArch() == llvm::Triple::ppc64)
- CmdArgs.push_back("elf64ppc");
- else if (ToolChain.getArch() == llvm::Triple::ppc64le)
- CmdArgs.push_back("elf64lppc");
- else if (ToolChain.getArch() == llvm::Triple::sparc)
- CmdArgs.push_back("elf32_sparc");
- else if (ToolChain.getArch() == llvm::Triple::sparcv9)
- CmdArgs.push_back("elf64_sparc");
- else if (ToolChain.getArch() == llvm::Triple::mips)
- CmdArgs.push_back("elf32btsmip");
- else if (ToolChain.getArch() == llvm::Triple::mipsel)
- CmdArgs.push_back("elf32ltsmip");
- else if (ToolChain.getArch() == llvm::Triple::mips64) {
- if (mips::hasMipsAbiArg(Args, "n32"))
- CmdArgs.push_back("elf32btsmipn32");
- else
- CmdArgs.push_back("elf64btsmip");
- }
- else if (ToolChain.getArch() == llvm::Triple::mips64el) {
- if (mips::hasMipsAbiArg(Args, "n32"))
- CmdArgs.push_back("elf32ltsmipn32");
- else
- CmdArgs.push_back("elf64ltsmip");
- }
- else if (ToolChain.getArch() == llvm::Triple::systemz)
- CmdArgs.push_back("elf64_s390");
- else if (ToolChain.getArch() == llvm::Triple::x86_64 &&
- ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUX32)
- CmdArgs.push_back("elf32_x86_64");
- else
- CmdArgs.push_back("elf_x86_64");
+ CmdArgs.push_back(getLDMOption(ToolChain.getTriple(), Args));
if (Args.hasArg(options::OPT_static)) {
if (ToolChain.getArch() == llvm::Triple::arm ||
@@ -7405,7 +7503,7 @@ void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
Args.AddAllArgs(CmdArgs, options::OPT_u);
- const ToolChain::path_list Paths = ToolChain.getFilePaths();
+ const ToolChain::path_list &Paths = ToolChain.getFilePaths();
for (const auto &Path : Paths)
CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path));
@@ -7416,9 +7514,8 @@ void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("--no-demangle");
+ bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
-
- addSanitizerRuntimes(getToolChain(), Args, CmdArgs);
// The profile runtime also needs access to system libraries.
addProfileRT(getToolChain(), Args, CmdArgs);
@@ -7440,6 +7537,9 @@ void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("--start-group");
+ if (NeedsSanitizerDeps)
+ linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
+
LibOpenMP UsedOpenMPLib = LibUnknown;
if (Args.hasArg(options::OPT_fopenmp)) {
UsedOpenMPLib = LibGOMP;
@@ -7496,7 +7596,8 @@ void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- C.addCommand(new Command(JA, *this, ToolChain.Linker.c_str(), CmdArgs));
+ C.addCommand(
+ llvm::make_unique<Command>(JA, *this, ToolChain.Linker.c_str(), CmdArgs));
}
void minix::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7504,6 +7605,7 @@ void minix::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
ArgStringList CmdArgs;
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
@@ -7515,7 +7617,7 @@ void minix::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void minix::Link::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7569,7 +7671,7 @@ void minix::Link::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
/// DragonFly Tools
@@ -7581,6 +7683,7 @@ void dragonfly::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
ArgStringList CmdArgs;
// When building 32-bit code on DragonFly/pc64, we have to explicitly
@@ -7597,7 +7700,7 @@ void dragonfly::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7605,12 +7708,9 @@ void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- bool UseGCC47 = false;
const Driver &D = getToolChain().getDriver();
ArgStringList CmdArgs;
-
- if (llvm::sys::fs::exists("/usr/lib/gcc47", UseGCC47))
- UseGCC47 = false;
+ bool UseGCC47 = llvm::sys::fs::exists("/usr/lib/gcc47");
if (!D.SysRoot.empty())
CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
@@ -7747,16 +7847,27 @@ void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA,
addProfileRT(getToolChain(), Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
-}
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+}
+
+// Try to find Exe from a Visual Studio distribution. This first tries to find
+// an installed copy of Visual Studio and, failing that, looks in the PATH,
+// making sure that whatever executable that's found is not a same-named exe
+// from clang itself to prevent clang from falling back to itself.
+static std::string FindVisualStudioExecutable(const ToolChain &TC,
+ const char *Exe,
+ const char *ClangProgramPath) {
+ const auto &MSVC = static_cast<const toolchains::MSVCToolChain &>(TC);
+ std::string visualStudioBinDir;
+ if (MSVC.getVisualStudioBinariesFolder(ClangProgramPath,
+ visualStudioBinDir)) {
+ SmallString<128> FilePath(visualStudioBinDir);
+ llvm::sys::path::append(FilePath, Exe);
+ if (llvm::sys::fs::can_execute(FilePath.c_str()))
+ return FilePath.str();
+ }
-static void addSanitizerRTWindows(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs,
- const StringRef RTName) {
- SmallString<128> LibSanitizer(getCompilerRTLibDir(TC));
- llvm::sys::path::append(LibSanitizer,
- Twine("clang_rt.") + RTName + ".lib");
- CmdArgs.push_back(Args.MakeArgString(LibSanitizer));
+ return Exe;
}
void visualstudio::Link::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7765,28 +7876,56 @@ void visualstudio::Link::ConstructJob(Compilation &C, const JobAction &JA,
const ArgList &Args,
const char *LinkingOutput) const {
ArgStringList CmdArgs;
+ const ToolChain &TC = getToolChain();
- if (Output.isFilename()) {
+ assert((Output.isFilename() || Output.isNothing()) && "invalid output");
+ if (Output.isFilename())
CmdArgs.push_back(Args.MakeArgString(std::string("-out:") +
Output.getFilename()));
- } else {
- assert(Output.isNothing() && "Invalid output.");
- }
if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles) &&
- !C.getDriver().IsCLMode()) {
+ !Args.hasArg(options::OPT_nostartfiles) && !C.getDriver().IsCLMode())
CmdArgs.push_back("-defaultlib:libcmt");
+
+ if (!llvm::sys::Process::GetEnv("LIB")) {
+ // If the VC environment hasn't been configured (perhaps because the user
+ // did not run vcvarsall), try to build a consistent link environment. If
+ // the environment variable is set however, assume the user knows what he's
+ // doing.
+ std::string VisualStudioDir;
+ const auto &MSVC = static_cast<const toolchains::MSVCToolChain &>(TC);
+ if (MSVC.getVisualStudioInstallDir(VisualStudioDir)) {
+ SmallString<128> LibDir(VisualStudioDir);
+ llvm::sys::path::append(LibDir, "VC", "lib");
+ switch (MSVC.getArch()) {
+ case llvm::Triple::x86:
+ // x86 just puts the libraries directly in lib
+ break;
+ case llvm::Triple::x86_64:
+ llvm::sys::path::append(LibDir, "amd64");
+ break;
+ case llvm::Triple::arm:
+ llvm::sys::path::append(LibDir, "arm");
+ break;
+ default:
+ break;
+ }
+ CmdArgs.push_back(
+ Args.MakeArgString(std::string("-libpath:") + LibDir.c_str()));
+ }
+
+ std::string WindowsSdkLibPath;
+ if (MSVC.getWindowsSDKLibraryPath(WindowsSdkLibPath))
+ CmdArgs.push_back(Args.MakeArgString(std::string("-libpath:") +
+ WindowsSdkLibPath.c_str()));
}
CmdArgs.push_back("-nologo");
- if (Args.hasArg(options::OPT_g_Group)) {
+ if (Args.hasArg(options::OPT_g_Group))
CmdArgs.push_back("-debug");
- }
bool DLL = Args.hasArg(options::OPT__SLASH_LD, options::OPT__SLASH_LDd);
-
if (DLL) {
CmdArgs.push_back(Args.MakeArgString("-dll"));
@@ -7796,32 +7935,81 @@ void visualstudio::Link::ConstructJob(Compilation &C, const JobAction &JA,
ImplibName.str()));
}
- if (getToolChain().getSanitizerArgs().needsAsanRt()) {
+ if (TC.getSanitizerArgs().needsAsanRt()) {
CmdArgs.push_back(Args.MakeArgString("-debug"));
CmdArgs.push_back(Args.MakeArgString("-incremental:no"));
- // FIXME: Handle 64-bit.
- if (DLL) {
- addSanitizerRTWindows(getToolChain(), Args, CmdArgs,
- "asan_dll_thunk-i386");
+ if (Args.hasArg(options::OPT__SLASH_MD, options::OPT__SLASH_MDd)) {
+ static const char *CompilerRTComponents[] = {
+ "asan_dynamic",
+ "asan_dynamic_runtime_thunk",
+ };
+ for (const auto &Component : CompilerRTComponents)
+ CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, Component)));
+ // Make sure the dynamic runtime thunk is not optimized out at link time
+ // to ensure proper SEH handling.
+ CmdArgs.push_back(Args.MakeArgString("-include:___asan_seh_interceptor"));
+ } else if (DLL) {
+ CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, "asan_dll_thunk")));
} else {
- addSanitizerRTWindows(getToolChain(), Args, CmdArgs, "asan-i386");
- addSanitizerRTWindows(getToolChain(), Args, CmdArgs, "asan_cxx-i386");
+ static const char *CompilerRTComponents[] = {
+ "asan",
+ "asan_cxx",
+ };
+ for (const auto &Component : CompilerRTComponents)
+ CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, Component)));
}
}
- Args.AddAllArgValues(CmdArgs, options::OPT_l);
Args.AddAllArgValues(CmdArgs, options::OPT__SLASH_link);
- // Add filenames immediately.
- for (const auto &Input : Inputs)
- if (Input.isFilename())
+ // Add filenames, libraries, and other linker inputs.
+ for (const auto &Input : Inputs) {
+ if (Input.isFilename()) {
CmdArgs.push_back(Input.getFilename());
- else
- Input.getInputArg().renderAsInput(Args, CmdArgs);
+ continue;
+ }
- const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath("link.exe"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ const Arg &A = Input.getInputArg();
+
+ // Render -l options differently for the MSVC linker.
+ if (A.getOption().matches(options::OPT_l)) {
+ StringRef Lib = A.getValue();
+ const char *LinkLibArg;
+ if (Lib.endswith(".lib"))
+ LinkLibArg = Args.MakeArgString(Lib);
+ else
+ LinkLibArg = Args.MakeArgString(Lib + ".lib");
+ CmdArgs.push_back(LinkLibArg);
+ continue;
+ }
+
+ // Otherwise, this is some other kind of linker input option like -Wl, -z,
+ // or -L. Render it, even if MSVC doesn't understand it.
+ A.renderAsInput(Args, CmdArgs);
+ }
+
+ // We need to special case some linker paths. In the case of lld, we need to
+ // translate 'lld' into 'lld-link', and in the case of the regular msvc
+ // linker, we need to use a special search algorithm.
+ llvm::SmallString<128> linkPath;
+ StringRef Linker = Args.getLastArgValue(options::OPT_fuse_ld_EQ, "link");
+ if (Linker.equals_lower("lld"))
+ Linker = "lld-link";
+
+ if (Linker.equals_lower("link")) {
+ // If we're using the MSVC linker, it's not sufficient to just use link
+ // from the program PATH, because other environments like GnuWin32 install
+ // their own link.exe which may come first.
+ linkPath = FindVisualStudioExecutable(TC, "link.exe",
+ C.getDriver().getClangProgramPath());
+ } else {
+ linkPath = Linker;
+ llvm::sys::path::replace_extension(linkPath, "exe");
+ linkPath = TC.GetProgramPath(linkPath.c_str());
+ }
+
+ const char *Exec = Args.MakeArgString(linkPath);
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void visualstudio::Compile::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7832,40 +8020,10 @@ void visualstudio::Compile::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(GetCommand(C, JA, Output, Inputs, Args, LinkingOutput));
}
-// Try to find FallbackName on PATH that is not identical to ClangProgramPath.
-// If one cannot be found, return FallbackName.
-// We do this special search to prevent clang-cl from falling back onto itself
-// if it's available as cl.exe on the path.
-static std::string FindFallback(const char *FallbackName,
- const char *ClangProgramPath) {
- llvm::Optional<std::string> OptPath = llvm::sys::Process::GetEnv("PATH");
- if (!OptPath.hasValue())
- return FallbackName;
-
- const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'};
- SmallVector<StringRef, 8> PathSegments;
- llvm::SplitString(OptPath.getValue(), PathSegments, EnvPathSeparatorStr);
-
- for (size_t i = 0, e = PathSegments.size(); i != e; ++i) {
- const StringRef &PathSegment = PathSegments[i];
- if (PathSegment.empty())
- continue;
-
- SmallString<128> FilePath(PathSegment);
- llvm::sys::path::append(FilePath, FallbackName);
- if (llvm::sys::fs::can_execute(Twine(FilePath)) &&
- !llvm::sys::fs::equivalent(Twine(FilePath), ClangProgramPath))
- return FilePath.str();
- }
-
- return FallbackName;
-}
-
-Command *visualstudio::Compile::GetCommand(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
+std::unique_ptr<Command> visualstudio::Compile::GetCommand(
+ Compilation &C, const JobAction &JA, const InputInfo &Output,
+ const InputInfoList &Inputs, const ArgList &Args,
+ const char *LinkingOutput) const {
ArgStringList CmdArgs;
CmdArgs.push_back("/nologo");
CmdArgs.push_back("/c"); // Compile only.
@@ -7943,8 +8101,10 @@ Command *visualstudio::Compile::GetCommand(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Fo);
const Driver &D = getToolChain().getDriver();
- std::string Exec = FindFallback("cl.exe", D.getClangProgramPath());
- return new Command(JA, *this, Args.MakeArgString(Exec), CmdArgs);
+ std::string Exec = FindVisualStudioExecutable(getToolChain(), "cl.exe",
+ D.getClangProgramPath());
+ return llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
+ CmdArgs);
}
@@ -7956,6 +8116,7 @@ void XCore::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
ArgStringList CmdArgs;
CmdArgs.push_back("-o");
@@ -7981,7 +8142,7 @@ void XCore::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
void XCore::Link::ConstructJob(Compilation &C, const JobAction &JA,
@@ -8001,12 +8162,189 @@ void XCore::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_v))
CmdArgs.push_back("-v");
- ExceptionSettings EH = exceptionSettings(Args, getToolChain().getTriple());
- if (EH.ShouldUseExceptionTables)
+ if (exceptionSettings(Args, getToolChain().getTriple()))
CmdArgs.push_back("-fexceptions");
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
- C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+}
+
+void CrossWindows::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ const auto &TC =
+ static_cast<const toolchains::CrossWindowsToolChain &>(getToolChain());
+ ArgStringList CmdArgs;
+ const char *Exec;
+
+ switch (TC.getArch()) {
+ default: llvm_unreachable("unsupported architecture");
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ break;
+ case llvm::Triple::x86:
+ CmdArgs.push_back("--32");
+ break;
+ case llvm::Triple::x86_64:
+ CmdArgs.push_back("--64");
+ break;
+ }
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &Input : Inputs)
+ CmdArgs.push_back(Input.getFilename());
+
+ const std::string Assembler = TC.GetProgramPath("as");
+ Exec = Args.MakeArgString(Assembler);
+
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+}
+
+void CrossWindows::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const auto &TC =
+ static_cast<const toolchains::CrossWindowsToolChain &>(getToolChain());
+ const llvm::Triple &T = TC.getTriple();
+ const Driver &D = TC.getDriver();
+ SmallString<128> EntryPoint;
+ ArgStringList CmdArgs;
+ const char *Exec;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_w);
+ // Other warning options are already handled somewhere else.
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back("-pie");
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ if (Args.hasArg(options::OPT_s))
+ CmdArgs.push_back("--strip-all");
+
+ CmdArgs.push_back("-m");
+ switch (TC.getArch()) {
+ default: llvm_unreachable("unsupported architecture");
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ // FIXME: this is incorrect for WinCE
+ CmdArgs.push_back("thumb2pe");
+ break;
+ case llvm::Triple::x86:
+ CmdArgs.push_back("i386pe");
+ EntryPoint.append("_");
+ break;
+ case llvm::Triple::x86_64:
+ CmdArgs.push_back("i386pep");
+ break;
+ }
+
+ if (Args.hasArg(options::OPT_shared)) {
+ switch (T.getArch()) {
+ default: llvm_unreachable("unsupported architecture");
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ case llvm::Triple::x86_64:
+ EntryPoint.append("_DllMainCRTStartup");
+ break;
+ case llvm::Triple::x86:
+ EntryPoint.append("_DllMainCRTStartup@12");
+ break;
+ }
+
+ CmdArgs.push_back("-shared");
+ CmdArgs.push_back("-Bdynamic");
+
+ CmdArgs.push_back("--enable-auto-image-base");
+
+ CmdArgs.push_back("--entry");
+ CmdArgs.push_back(Args.MakeArgString(EntryPoint));
+ } else {
+ EntryPoint.append("mainCRTStartup");
+
+ CmdArgs.push_back(Args.hasArg(options::OPT_static) ? "-Bstatic"
+ : "-Bdynamic");
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ CmdArgs.push_back("--entry");
+ CmdArgs.push_back(Args.MakeArgString(EntryPoint));
+ }
+
+ // FIXME: handle subsystem
+ }
+
+ // NOTE: deal with multiple definitions on Windows (e.g. COMDAT)
+ CmdArgs.push_back("--allow-multiple-definition");
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_rdynamic)) {
+ SmallString<261> ImpLib(Output.getFilename());
+ llvm::sys::path::replace_extension(ImpLib, ".lib");
+
+ CmdArgs.push_back("--out-implib");
+ CmdArgs.push_back(Args.MakeArgString(ImpLib));
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ const std::string CRTPath(D.SysRoot + "/usr/lib/");
+ const char *CRTBegin;
+
+ CRTBegin =
+ Args.hasArg(options::OPT_shared) ? "crtbeginS.obj" : "crtbegin.obj";
+ CmdArgs.push_back(Args.MakeArgString(CRTPath + CRTBegin));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+
+ const auto &Paths = TC.getFilePaths();
+ for (const auto &Path : Paths)
+ CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path));
+
+ AddLinkerInputs(TC, Inputs, Args, CmdArgs);
+
+ if (D.CCCIsCXX() && !Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ bool StaticCXX = Args.hasArg(options::OPT_static_libstdcxx) &&
+ !Args.hasArg(options::OPT_static);
+ if (StaticCXX)
+ CmdArgs.push_back("-Bstatic");
+ TC.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (StaticCXX)
+ CmdArgs.push_back("-Bdynamic");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib)) {
+ if (!Args.hasArg(options::OPT_nodefaultlibs)) {
+ // TODO handle /MT[d] /MD[d]
+ CmdArgs.push_back("-lmsvcrt");
+ AddRunTimeLibs(TC, D, CmdArgs, Args);
+ }
+ }
+
+ const std::string Linker = TC.GetProgramPath("ld");
+ Exec = Args.MakeArgString(Linker);
+
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
}
diff --git a/lib/Driver/Tools.h b/lib/Driver/Tools.h
index 4c89676e0e19..6647f39ce817 100644
--- a/lib/Driver/Tools.h
+++ b/lib/Driver/Tools.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_LIB_DRIVER_TOOLS_H_
-#define CLANG_LIB_DRIVER_TOOLS_H_
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLS_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLS_H
#include "clang/Driver/Tool.h"
#include "clang/Driver/Types.h"
@@ -63,6 +63,8 @@ using llvm::opt::ArgStringList;
llvm::opt::ArgStringList &CmdArgs) const;
void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ void AddPPCTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
void AddR600TargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
void AddSparcTargetArgs(const llvm::opt::ArgList &Args,
@@ -88,11 +90,12 @@ using llvm::opt::ArgStringList;
mutable std::unique_ptr<visualstudio::Compile> CLFallback;
public:
- Clang(const ToolChain &TC) : Tool("clang", "clang frontend", TC) {}
+ Clang(const ToolChain &TC) : Tool("clang", "clang frontend", TC, RF_Full) {}
bool hasGoodDiagnostics() const override { return true; }
bool hasIntegratedAssembler() const override { return true; }
bool hasIntegratedCPP() const override { return true; }
+ bool canEmitIR() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
@@ -104,7 +107,8 @@ using llvm::opt::ArgStringList;
class LLVM_LIBRARY_VISIBILITY ClangAs : public Tool {
public:
ClangAs(const ToolChain &TC) : Tool("clang::as",
- "clang integrated assembler", TC) {}
+ "clang integrated assembler", TC,
+ RF_Full) {}
bool hasGoodDiagnostics() const override { return true; }
bool hasIntegratedAssembler() const override { return false; }
@@ -116,12 +120,22 @@ using llvm::opt::ArgStringList;
const char *LinkingOutput) const override;
};
+ /// \brief Base class for all GNU tools that provide the same behavior when
+ /// it comes to response files support
+ class GnuTool : public Tool {
+ virtual void anchor();
+
+ public:
+ GnuTool(const char *Name, const char *ShortName, const ToolChain &TC)
+ : Tool(Name, ShortName, TC, RF_Full, llvm::sys::WEM_CurrentCodePage) {}
+ };
+
/// gcc - Generic GCC tool implementations.
namespace gcc {
- class LLVM_LIBRARY_VISIBILITY Common : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Common : public GnuTool {
public:
Common(const char *Name, const char *ShortName,
- const ToolChain &TC) : Tool(Name, ShortName, TC) {}
+ const ToolChain &TC) : GnuTool(Name, ShortName, TC) {}
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -176,9 +190,9 @@ namespace gcc {
namespace hexagon {
// For Hexagon, we do not need to instantiate tools for PreProcess, PreCompile and Compile.
// We simply use "clang -cc1" for those actions.
- class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public GnuTool {
public:
- Assemble(const ToolChain &TC) : Tool("hexagon::Assemble",
+ Assemble(const ToolChain &TC) : GnuTool("hexagon::Assemble",
"hexagon-as", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -191,9 +205,9 @@ namespace hexagon {
const char *LinkingOutput) const override;
};
- class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Link : public GnuTool {
public:
- Link(const ToolChain &TC) : Tool("hexagon::Link",
+ Link(const ToolChain &TC) : GnuTool("hexagon::Link",
"hexagon-ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -221,11 +235,16 @@ namespace mips {
const llvm::Triple &Triple, StringRef &CPUName,
StringRef &ABIName);
bool hasMipsAbiArg(const llvm::opt::ArgList &Args, const char *Value);
+ bool isUCLibc(const llvm::opt::ArgList &Args);
bool isNaN2008(const llvm::opt::ArgList &Args, const llvm::Triple &Triple);
bool isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
StringRef ABIName);
}
+namespace ppc {
+ bool hasPPCAbiArg(const llvm::opt::ArgList &Args, const char *Value);
+}
+
namespace darwin {
llvm::Triple::ArchType getArchTypeForMachOArchName(StringRef Str);
void setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str);
@@ -241,8 +260,13 @@ namespace darwin {
}
public:
- MachOTool(const char *Name, const char *ShortName,
- const ToolChain &TC) : Tool(Name, ShortName, TC) {}
+ MachOTool(
+ const char *Name, const char *ShortName, const ToolChain &TC,
+ ResponseFileSupport ResponseSupport = RF_None,
+ llvm::sys::WindowsEncodingMethod ResponseEncoding = llvm::sys::WEM_UTF8,
+ const char *ResponseFlag = "@")
+ : Tool(Name, ShortName, TC, ResponseSupport, ResponseEncoding,
+ ResponseFlag) {}
};
class LLVM_LIBRARY_VISIBILITY Assemble : public MachOTool {
@@ -265,7 +289,9 @@ namespace darwin {
const InputInfoList &Inputs) const;
public:
- Link(const ToolChain &TC) : MachOTool("darwin::Link", "linker", TC) {}
+ Link(const ToolChain &TC) : MachOTool("darwin::Link", "linker", TC,
+ RF_FileList, llvm::sys::WEM_UTF8,
+ "-filelist") {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -320,9 +346,9 @@ namespace darwin {
/// openbsd -- Directly call GNU Binutils assembler and linker
namespace openbsd {
- class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public GnuTool {
public:
- Assemble(const ToolChain &TC) : Tool("openbsd::Assemble", "assembler",
+ Assemble(const ToolChain &TC) : GnuTool("openbsd::Assemble", "assembler",
TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -333,9 +359,9 @@ namespace openbsd {
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
- class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Link : public GnuTool {
public:
- Link(const ToolChain &TC) : Tool("openbsd::Link", "linker", TC) {}
+ Link(const ToolChain &TC) : GnuTool("openbsd::Link", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -349,9 +375,9 @@ namespace openbsd {
/// bitrig -- Directly call GNU Binutils assembler and linker
namespace bitrig {
- class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public GnuTool {
public:
- Assemble(const ToolChain &TC) : Tool("bitrig::Assemble", "assembler",
+ Assemble(const ToolChain &TC) : GnuTool("bitrig::Assemble", "assembler",
TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -361,9 +387,9 @@ namespace bitrig {
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
- class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Link : public GnuTool {
public:
- Link(const ToolChain &TC) : Tool("bitrig::Link", "linker", TC) {}
+ Link(const ToolChain &TC) : GnuTool("bitrig::Link", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -377,9 +403,9 @@ namespace bitrig {
/// freebsd -- Directly call GNU Binutils assembler and linker
namespace freebsd {
- class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public GnuTool {
public:
- Assemble(const ToolChain &TC) : Tool("freebsd::Assemble", "assembler",
+ Assemble(const ToolChain &TC) : GnuTool("freebsd::Assemble", "assembler",
TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -389,9 +415,9 @@ namespace freebsd {
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
- class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Link : public GnuTool {
public:
- Link(const ToolChain &TC) : Tool("freebsd::Link", "linker", TC) {}
+ Link(const ToolChain &TC) : GnuTool("freebsd::Link", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -405,11 +431,11 @@ namespace freebsd {
/// netbsd -- Directly call GNU Binutils assembler and linker
namespace netbsd {
- class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public GnuTool {
public:
Assemble(const ToolChain &TC)
- : Tool("netbsd::Assemble", "assembler", TC) {}
+ : GnuTool("netbsd::Assemble", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -418,11 +444,11 @@ namespace netbsd {
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
- class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Link : public GnuTool {
public:
Link(const ToolChain &TC)
- : Tool("netbsd::Link", "linker", TC) {}
+ : GnuTool("netbsd::Link", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -436,9 +462,9 @@ namespace netbsd {
/// Directly call GNU Binutils' assembler and linker.
namespace gnutools {
- class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public GnuTool {
public:
- Assemble(const ToolChain &TC) : Tool("GNU::Assemble", "assembler", TC) {}
+ Assemble(const ToolChain &TC) : GnuTool("GNU::Assemble", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -448,9 +474,9 @@ namespace gnutools {
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
- class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Link : public GnuTool {
public:
- Link(const ToolChain &TC) : Tool("GNU::Link", "linker", TC) {}
+ Link(const ToolChain &TC) : GnuTool("GNU::Link", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -464,9 +490,9 @@ namespace gnutools {
}
/// minix -- Directly call GNU Binutils assembler and linker
namespace minix {
- class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public GnuTool {
public:
- Assemble(const ToolChain &TC) : Tool("minix::Assemble", "assembler",
+ Assemble(const ToolChain &TC) : GnuTool("minix::Assemble", "assembler",
TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -477,9 +503,9 @@ namespace minix {
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
- class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Link : public GnuTool {
public:
- Link(const ToolChain &TC) : Tool("minix::Link", "linker", TC) {}
+ Link(const ToolChain &TC) : GnuTool("minix::Link", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -520,39 +546,11 @@ namespace solaris {
};
} // end namespace solaris
- /// auroraux -- Directly call GNU Binutils assembler and linker
-namespace auroraux {
- class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
- public:
- Assemble(const ToolChain &TC) : Tool("auroraux::Assemble", "assembler",
- TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
- class LLVM_LIBRARY_VISIBILITY Link : public Tool {
- public:
- Link(const ToolChain &TC) : Tool("auroraux::Link", "linker", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
- bool isLinkJob() const override { return true; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
-} // end namespace auroraux
-
/// dragonfly -- Directly call GNU Binutils assembler and linker
namespace dragonfly {
- class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public GnuTool {
public:
- Assemble(const ToolChain &TC) : Tool("dragonfly::Assemble", "assembler",
+ Assemble(const ToolChain &TC) : GnuTool("dragonfly::Assemble", "assembler",
TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -562,9 +560,9 @@ namespace dragonfly {
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
- class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Link : public GnuTool {
public:
- Link(const ToolChain &TC) : Tool("dragonfly::Link", "linker", TC) {}
+ Link(const ToolChain &TC) : GnuTool("dragonfly::Link", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -581,7 +579,8 @@ namespace dragonfly {
namespace visualstudio {
class LLVM_LIBRARY_VISIBILITY Link : public Tool {
public:
- Link(const ToolChain &TC) : Tool("visualstudio::Link", "linker", TC) {}
+ Link(const ToolChain &TC) : Tool("visualstudio::Link", "linker", TC,
+ RF_Full, llvm::sys::WEM_UTF16) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -594,7 +593,8 @@ namespace visualstudio {
class LLVM_LIBRARY_VISIBILITY Compile : public Tool {
public:
- Compile(const ToolChain &TC) : Tool("visualstudio::Compile", "compiler", TC) {}
+ Compile(const ToolChain &TC) : Tool("visualstudio::Compile", "compiler", TC,
+ RF_Full, llvm::sys::WEM_UTF16) {}
bool hasIntegratedAssembler() const override { return true; }
bool hasIntegratedCPP() const override { return true; }
@@ -605,11 +605,11 @@ namespace visualstudio {
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
- Command *GetCommand(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const;
+ std::unique_ptr<Command> GetCommand(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const;
};
} // end namespace visualstudio
@@ -646,9 +646,35 @@ namespace XCore {
};
} // end namespace XCore.
+namespace CrossWindows {
+class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+public:
+ Assemble(const ToolChain &TC) : Tool("CrossWindows::Assemble", "as", TC) { }
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+public:
+ Link(const ToolChain &TC) : Tool("CrossWindows::Link", "ld", TC, RF_Full) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+}
} // end namespace toolchains
} // end namespace driver
} // end namespace clang
-#endif // CLANG_LIB_DRIVER_TOOLS_H_
+#endif
diff --git a/lib/Driver/Types.cpp b/lib/Driver/Types.cpp
index 3538dbc2c0e9..6ee764c64e34 100644
--- a/lib/Driver/Types.cpp
+++ b/lib/Driver/Types.cpp
@@ -143,6 +143,7 @@ types::ID types::lookupTypeForExtension(const char *Ext) {
.Case("S", TY_Asm)
.Case("o", TY_Object)
.Case("obj", TY_Object)
+ .Case("lib", TY_Object)
.Case("ii", TY_PP_CXX)
.Case("mi", TY_PP_ObjC)
.Case("mm", TY_ObjCXX)
@@ -202,6 +203,7 @@ void types::getCompilationPhases(ID Id, llvm::SmallVectorImpl<phases::ID> &P) {
} else {
if (!onlyAssembleType(Id)) {
P.push_back(phases::Compile);
+ P.push_back(phases::Backend);
}
P.push_back(phases::Assemble);
}
diff --git a/lib/Driver/WindowsToolChain.cpp b/lib/Driver/WindowsToolChain.cpp
deleted file mode 100644
index 913425a19f00..000000000000
--- a/lib/Driver/WindowsToolChain.cpp
+++ /dev/null
@@ -1,338 +0,0 @@
-//===--- ToolChains.cpp - ToolChain Implementations -----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ToolChains.h"
-#include "clang/Basic/CharInfo.h"
-#include "clang/Basic/Version.h"
-#include "clang/Driver/Compilation.h"
-#include "clang/Driver/Driver.h"
-#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Driver/Options.h"
-#include "llvm/Config/llvm-config.h"
-#include "llvm/Option/Arg.h"
-#include "llvm/Option/ArgList.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/Path.h"
-
-// Include the necessary headers to interface with the Windows registry and
-// environment.
-#if defined(LLVM_ON_WIN32)
-#define USE_WIN32
-#endif
-
-#ifdef USE_WIN32
- #define WIN32_LEAN_AND_MEAN
- #define NOGDI
- #define NOMINMAX
- #include <windows.h>
-#endif
-
-using namespace clang::driver;
-using namespace clang::driver::toolchains;
-using namespace clang;
-using namespace llvm::opt;
-
-Windows::Windows(const Driver &D, const llvm::Triple& Triple,
- const ArgList &Args)
- : ToolChain(D, Triple, Args) {
-}
-
-Tool *Windows::buildLinker() const {
- return new tools::visualstudio::Link(*this);
-}
-
-Tool *Windows::buildAssembler() const {
- if (getTriple().isOSBinFormatMachO())
- return new tools::darwin::Assemble(*this);
- getDriver().Diag(clang::diag::err_no_external_assembler);
- return nullptr;
-}
-
-bool Windows::IsIntegratedAssemblerDefault() const {
- return true;
-}
-
-bool Windows::IsUnwindTablesDefault() const {
- // FIXME: LLVM's lowering of Win64 data is broken right now. MSVC's linker
- // says that our object files provide invalid .pdata contributions. Until
- // that is fixed, don't ask for unwind tables.
- return false;
- //return getArch() == llvm::Triple::x86_64;
-}
-
-bool Windows::isPICDefault() const {
- return getArch() == llvm::Triple::x86_64;
-}
-
-bool Windows::isPIEDefault() const {
- return false;
-}
-
-bool Windows::isPICDefaultForced() const {
- return getArch() == llvm::Triple::x86_64;
-}
-
-/// \brief Read registry string.
-/// This also supports a means to look for high-versioned keys by use
-/// of a $VERSION placeholder in the key path.
-/// $VERSION in the key path is a placeholder for the version number,
-/// causing the highest value path to be searched for and used.
-/// I.e. "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\$VERSION".
-/// There can be additional characters in the component. Only the numberic
-/// characters are compared.
-static bool getSystemRegistryString(const char *keyPath, const char *valueName,
- char *value, size_t maxLength) {
-#ifndef USE_WIN32
- return false;
-#else
- HKEY hRootKey = NULL;
- HKEY hKey = NULL;
- const char* subKey = NULL;
- DWORD valueType;
- DWORD valueSize = maxLength - 1;
- long lResult;
- bool returnValue = false;
-
- if (strncmp(keyPath, "HKEY_CLASSES_ROOT\\", 18) == 0) {
- hRootKey = HKEY_CLASSES_ROOT;
- subKey = keyPath + 18;
- } else if (strncmp(keyPath, "HKEY_USERS\\", 11) == 0) {
- hRootKey = HKEY_USERS;
- subKey = keyPath + 11;
- } else if (strncmp(keyPath, "HKEY_LOCAL_MACHINE\\", 19) == 0) {
- hRootKey = HKEY_LOCAL_MACHINE;
- subKey = keyPath + 19;
- } else if (strncmp(keyPath, "HKEY_CURRENT_USER\\", 18) == 0) {
- hRootKey = HKEY_CURRENT_USER;
- subKey = keyPath + 18;
- } else {
- return false;
- }
-
- const char *placeHolder = strstr(subKey, "$VERSION");
- char bestName[256];
- bestName[0] = '\0';
- // If we have a $VERSION placeholder, do the highest-version search.
- if (placeHolder) {
- const char *keyEnd = placeHolder - 1;
- const char *nextKey = placeHolder;
- // Find end of previous key.
- while ((keyEnd > subKey) && (*keyEnd != '\\'))
- keyEnd--;
- // Find end of key containing $VERSION.
- while (*nextKey && (*nextKey != '\\'))
- nextKey++;
- size_t partialKeyLength = keyEnd - subKey;
- char partialKey[256];
- if (partialKeyLength > sizeof(partialKey))
- partialKeyLength = sizeof(partialKey);
- strncpy(partialKey, subKey, partialKeyLength);
- partialKey[partialKeyLength] = '\0';
- HKEY hTopKey = NULL;
- lResult = RegOpenKeyEx(hRootKey, partialKey, 0, KEY_READ | KEY_WOW64_32KEY,
- &hTopKey);
- if (lResult == ERROR_SUCCESS) {
- char keyName[256];
- int bestIndex = -1;
- double bestValue = 0.0;
- DWORD index, size = sizeof(keyName) - 1;
- for (index = 0; RegEnumKeyEx(hTopKey, index, keyName, &size, NULL,
- NULL, NULL, NULL) == ERROR_SUCCESS; index++) {
- const char *sp = keyName;
- while (*sp && !isDigit(*sp))
- sp++;
- if (!*sp)
- continue;
- const char *ep = sp + 1;
- while (*ep && (isDigit(*ep) || (*ep == '.')))
- ep++;
- char numBuf[32];
- strncpy(numBuf, sp, sizeof(numBuf) - 1);
- numBuf[sizeof(numBuf) - 1] = '\0';
- double dvalue = strtod(numBuf, NULL);
- if (dvalue > bestValue) {
- // Test that InstallDir is indeed there before keeping this index.
- // Open the chosen key path remainder.
- strcpy(bestName, keyName);
- // Append rest of key.
- strncat(bestName, nextKey, sizeof(bestName) - 1);
- bestName[sizeof(bestName) - 1] = '\0';
- lResult = RegOpenKeyEx(hTopKey, bestName, 0,
- KEY_READ | KEY_WOW64_32KEY, &hKey);
- if (lResult == ERROR_SUCCESS) {
- lResult = RegQueryValueEx(hKey, valueName, NULL, &valueType,
- (LPBYTE)value, &valueSize);
- if (lResult == ERROR_SUCCESS) {
- bestIndex = (int)index;
- bestValue = dvalue;
- returnValue = true;
- }
- RegCloseKey(hKey);
- }
- }
- size = sizeof(keyName) - 1;
- }
- RegCloseKey(hTopKey);
- }
- } else {
- lResult = RegOpenKeyEx(hRootKey, subKey, 0, KEY_READ | KEY_WOW64_32KEY,
- &hKey);
- if (lResult == ERROR_SUCCESS) {
- lResult = RegQueryValueEx(hKey, valueName, NULL, &valueType,
- (LPBYTE)value, &valueSize);
- if (lResult == ERROR_SUCCESS)
- returnValue = true;
- RegCloseKey(hKey);
- }
- }
- return returnValue;
-#endif // USE_WIN32
-}
-
-/// \brief Get Windows SDK installation directory.
-static bool getWindowsSDKDir(std::string &path) {
- char windowsSDKInstallDir[256];
- // Try the Windows registry.
- bool hasSDKDir = getSystemRegistryString(
- "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
- "InstallationFolder",
- windowsSDKInstallDir,
- sizeof(windowsSDKInstallDir) - 1);
- // If we have both vc80 and vc90, pick version we were compiled with.
- if (hasSDKDir && windowsSDKInstallDir[0]) {
- path = windowsSDKInstallDir;
- return true;
- }
- return false;
-}
-
-// Get Visual Studio installation directory.
-static bool getVisualStudioDir(std::string &path) {
- // First check the environment variables that vsvars32.bat sets.
- const char* vcinstalldir = getenv("VCINSTALLDIR");
- if (vcinstalldir) {
- char *p = const_cast<char *>(strstr(vcinstalldir, "\\VC"));
- if (p)
- *p = '\0';
- path = vcinstalldir;
- return true;
- }
-
- char vsIDEInstallDir[256];
- char vsExpressIDEInstallDir[256];
- // Then try the windows registry.
- bool hasVCDir = getSystemRegistryString(
- "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\$VERSION",
- "InstallDir", vsIDEInstallDir, sizeof(vsIDEInstallDir) - 1);
- bool hasVCExpressDir = getSystemRegistryString(
- "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VCExpress\\$VERSION",
- "InstallDir", vsExpressIDEInstallDir, sizeof(vsExpressIDEInstallDir) - 1);
- // If we have both vc80 and vc90, pick version we were compiled with.
- if (hasVCDir && vsIDEInstallDir[0]) {
- char *p = (char*)strstr(vsIDEInstallDir, "\\Common7\\IDE");
- if (p)
- *p = '\0';
- path = vsIDEInstallDir;
- return true;
- }
-
- if (hasVCExpressDir && vsExpressIDEInstallDir[0]) {
- char *p = (char*)strstr(vsExpressIDEInstallDir, "\\Common7\\IDE");
- if (p)
- *p = '\0';
- path = vsExpressIDEInstallDir;
- return true;
- }
-
- // Try the environment.
- const char *vs100comntools = getenv("VS100COMNTOOLS");
- const char *vs90comntools = getenv("VS90COMNTOOLS");
- const char *vs80comntools = getenv("VS80COMNTOOLS");
-
- const char *vscomntools = nullptr;
-
- // Find any version we can
- if (vs100comntools)
- vscomntools = vs100comntools;
- else if (vs90comntools)
- vscomntools = vs90comntools;
- else if (vs80comntools)
- vscomntools = vs80comntools;
-
- if (vscomntools && *vscomntools) {
- const char *p = strstr(vscomntools, "\\Common7\\Tools");
- path = p ? std::string(vscomntools, p) : vscomntools;
- return true;
- }
- return false;
-}
-
-void Windows::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdinc))
- return;
-
- if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
- SmallString<128> P(getDriver().ResourceDir);
- llvm::sys::path::append(P, "include");
- addSystemInclude(DriverArgs, CC1Args, P.str());
- }
-
- if (DriverArgs.hasArg(options::OPT_nostdlibinc))
- return;
-
- // Honor %INCLUDE%. It should know essential search paths with vcvarsall.bat.
- if (const char *cl_include_dir = getenv("INCLUDE")) {
- SmallVector<StringRef, 8> Dirs;
- StringRef(cl_include_dir)
- .split(Dirs, ";", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
- for (StringRef Dir : Dirs)
- addSystemInclude(DriverArgs, CC1Args, Dir);
- if (!Dirs.empty())
- return;
- }
-
- std::string VSDir;
- std::string WindowsSDKDir;
-
- // When built with access to the proper Windows APIs, try to actually find
- // the correct include paths first.
- if (getVisualStudioDir(VSDir)) {
- SmallString<128> P;
- P = VSDir;
- llvm::sys::path::append(P, "VC\\include");
- addSystemInclude(DriverArgs, CC1Args, P.str());
- if (getWindowsSDKDir(WindowsSDKDir)) {
- P = WindowsSDKDir;
- llvm::sys::path::append(P, "include");
- addSystemInclude(DriverArgs, CC1Args, P.str());
- } else {
- P = VSDir;
- llvm::sys::path::append(P, "VC\\PlatformSDK\\Include");
- addSystemInclude(DriverArgs, CC1Args, P.str());
- }
- return;
- }
-
- // As a fallback, select default install paths.
- // FIXME: Don't guess drives and paths like this on Windows.
- const StringRef Paths[] = {
- "C:/Program Files/Microsoft Visual Studio 10.0/VC/include",
- "C:/Program Files/Microsoft Visual Studio 9.0/VC/include",
- "C:/Program Files/Microsoft Visual Studio 9.0/VC/PlatformSDK/Include",
- "C:/Program Files/Microsoft Visual Studio 8/VC/include",
- "C:/Program Files/Microsoft Visual Studio 8/VC/PlatformSDK/Include"
- };
- addSystemIncludes(DriverArgs, CC1Args, Paths);
-}
-
-void Windows::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- // FIXME: There should probably be logic here to find libc++ on Windows.
-}
diff --git a/lib/Edit/EditedSource.cpp b/lib/Edit/EditedSource.cpp
index 6cf6335da181..1c66cb827706 100644
--- a/lib/Edit/EditedSource.cpp
+++ b/lib/Edit/EditedSource.cpp
@@ -280,6 +280,12 @@ static void adjustRemoval(const SourceManager &SM, const LangOptions &LangOpts,
unsigned begin = offs.getOffset();
unsigned end = begin + len;
+ // Do not try to extend the removal if we're at the end of the buffer already.
+ if (end == buffer.size())
+ return;
+
+ assert(begin < buffer.size() && end < buffer.size() && "Invalid range!");
+
// FIXME: Remove newline.
if (begin == 0) {
diff --git a/lib/Edit/RewriteObjCFoundationAPI.cpp b/lib/Edit/RewriteObjCFoundationAPI.cpp
index 666844c65ffc..9f71168de8fc 100644
--- a/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -1149,7 +1149,8 @@ static bool rewriteToStringBoxedExpression(const ObjCMessageExpr *Msg,
Selector Sel = Msg->getSelector();
if (Sel == NS.getNSStringSelector(NSAPI::NSStr_stringWithUTF8String) ||
- Sel == NS.getNSStringSelector(NSAPI::NSStr_stringWithCString)) {
+ Sel == NS.getNSStringSelector(NSAPI::NSStr_stringWithCString) ||
+ Sel == NS.getNSStringSelector(NSAPI::NSStr_initWithUTF8String)) {
if (Msg->getNumArgs() != 1)
return false;
return doRewriteToUTF8StringBoxedExpressionHelper(Msg, NS, commit);
diff --git a/lib/Format/BreakableToken.cpp b/lib/Format/BreakableToken.cpp
index 1bea0e565131..26f1371b4092 100644
--- a/lib/Format/BreakableToken.cpp
+++ b/lib/Format/BreakableToken.cpp
@@ -303,7 +303,8 @@ BreakableBlockComment::BreakableBlockComment(
StartOfLineColumn[i] += Decoration.size();
Lines[i] = Lines[i].substr(Decoration.size());
LeadingWhitespace[i] += Decoration.size();
- IndentAtLineBreak = std::min<int>(IndentAtLineBreak, StartOfLineColumn[i]);
+ IndentAtLineBreak =
+ std::min<int>(IndentAtLineBreak, std::max(0, StartOfLineColumn[i]));
}
IndentAtLineBreak = std::max<unsigned>(IndentAtLineBreak, Decoration.size());
DEBUG({
diff --git a/lib/Format/BreakableToken.h b/lib/Format/BreakableToken.h
index 72bb1e4404ff..eb1f9fda3071 100644
--- a/lib/Format/BreakableToken.h
+++ b/lib/Format/BreakableToken.h
@@ -14,8 +14,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FORMAT_BREAKABLETOKEN_H
-#define LLVM_CLANG_FORMAT_BREAKABLETOKEN_H
+#ifndef LLVM_CLANG_LIB_FORMAT_BREAKABLETOKEN_H
+#define LLVM_CLANG_LIB_FORMAT_BREAKABLETOKEN_H
#include "Encoding.h"
#include "TokenAnnotator.h"
@@ -212,6 +212,11 @@ private:
// StartOfLineColumn[i] is the target column at which Line[i] should be.
// Note that this excludes a leading "* " or "*" in case all lines have
// a "*" prefix.
+ // The first line's target column is always positive. The remaining lines'
+ // target columns are relative to the first line to allow correct indentation
+ // of comments in \c WhitespaceManager. Thus they can be negative as well (in
+ // case the first line needs to be unindented more than there's actual
+ // whitespace in another line).
SmallVector<int, 16> StartOfLineColumn;
// The column at which the text of a broken line should start.
@@ -237,4 +242,4 @@ private:
} // namespace format
} // namespace clang
-#endif // LLVM_CLANG_FORMAT_BREAKABLETOKEN_H
+#endif
diff --git a/lib/Format/CMakeLists.txt b/lib/Format/CMakeLists.txt
index 47e15bd08a3f..2ce38343cfe9 100644
--- a/lib/Format/CMakeLists.txt
+++ b/lib/Format/CMakeLists.txt
@@ -6,11 +6,12 @@ add_clang_library(clangFormat
Format.cpp
FormatToken.cpp
TokenAnnotator.cpp
+ UnwrappedLineFormatter.cpp
UnwrappedLineParser.cpp
WhitespaceManager.cpp
LINK_LIBS
clangBasic
clangLex
- clangTooling
+ clangToolingCore
)
diff --git a/lib/Format/ContinuationIndenter.cpp b/lib/Format/ContinuationIndenter.cpp
index 014c30e346ad..4cc92b02a9e5 100644
--- a/lib/Format/ContinuationIndenter.cpp
+++ b/lib/Format/ContinuationIndenter.cpp
@@ -48,21 +48,22 @@ static bool startsSegmentOfBuilderTypeCall(const FormatToken &Tok) {
static bool startsNextParameter(const FormatToken &Current,
const FormatStyle &Style) {
const FormatToken &Previous = *Current.Previous;
- if (Current.Type == TT_CtorInitializerComma &&
+ if (Current.is(TT_CtorInitializerComma) &&
Style.BreakConstructorInitializersBeforeComma)
return true;
return Previous.is(tok::comma) && !Current.isTrailingComment() &&
- (Previous.Type != TT_CtorInitializerComma ||
+ (Previous.isNot(TT_CtorInitializerComma) ||
!Style.BreakConstructorInitializersBeforeComma);
}
ContinuationIndenter::ContinuationIndenter(const FormatStyle &Style,
+ const AdditionalKeywords &Keywords,
SourceManager &SourceMgr,
WhitespaceManager &Whitespaces,
encoding::Encoding Encoding,
bool BinPackInconclusiveFunctions)
- : Style(Style), SourceMgr(SourceMgr), Whitespaces(Whitespaces),
- Encoding(Encoding),
+ : Style(Style), Keywords(Keywords), SourceMgr(SourceMgr),
+ Whitespaces(Whitespaces), Encoding(Encoding),
BinPackInconclusiveFunctions(BinPackInconclusiveFunctions),
CommentPragmasRegex(Style.CommentPragmas) {}
@@ -92,13 +93,14 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
const FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *Current.Previous;
assert(&Previous == Current.Previous);
- if (!Current.CanBreakBefore && !(State.Stack.back().BreakBeforeClosingBrace &&
- Current.closesBlockTypeList(Style)))
+ if (!Current.CanBreakBefore &&
+ !(State.Stack.back().BreakBeforeClosingBrace &&
+ Current.closesBlockTypeList(Style)))
return false;
// The opening "{" of a braced list has to be on the same line as the first
// element if it is nested in another braced init list or function call.
if (!Current.MustBreakBefore && Previous.is(tok::l_brace) &&
- Previous.Type != TT_DictLiteral && Previous.BlockKind == BK_BracedInit &&
+ Previous.isNot(TT_DictLiteral) && Previous.BlockKind == BK_BracedInit &&
Previous.Previous &&
Previous.Previous->isOneOf(tok::l_brace, tok::l_paren, tok::comma))
return false;
@@ -116,19 +118,24 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
// Don't create a 'hanging' indent if there are multiple blocks in a single
// statement.
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Previous.is(tok::l_brace) && State.Stack.size() > 1 &&
- State.Stack[State.Stack.size() - 2].JSFunctionInlined &&
+ if (Previous.is(tok::l_brace) && State.Stack.size() > 1 &&
+ State.Stack[State.Stack.size() - 2].NestedBlockInlined &&
State.Stack[State.Stack.size() - 2].HasMultipleNestedBlocks)
return false;
+ // Don't break after very short return types (e.g. "void") as that is often
+ // unexpected.
+ if (Current.is(TT_FunctionDeclarationName) &&
+ !Style.AlwaysBreakAfterDefinitionReturnType && State.Column < 6)
+ return false;
+
return !State.Stack.back().NoLineBreak;
}
bool ContinuationIndenter::mustBreak(const LineState &State) {
const FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *Current.Previous;
- if (Current.MustBreakBefore || Current.Type == TT_InlineASMColon)
+ if (Current.MustBreakBefore || Current.is(TT_InlineASMColon))
return true;
if (State.Stack.back().BreakBeforeClosingBrace &&
Current.closesBlockTypeList(Style))
@@ -137,32 +144,32 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
return true;
if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) ||
(Style.BreakBeforeTernaryOperators &&
- (Current.is(tok::question) || (Current.Type == TT_ConditionalExpr &&
- Previous.isNot(tok::question)))) ||
+ (Current.is(tok::question) ||
+ (Current.is(TT_ConditionalExpr) && Previous.isNot(tok::question)))) ||
(!Style.BreakBeforeTernaryOperators &&
- (Previous.is(tok::question) || Previous.Type == TT_ConditionalExpr))) &&
+ (Previous.is(tok::question) || Previous.is(TT_ConditionalExpr)))) &&
State.Stack.back().BreakBeforeParameter && !Current.isTrailingComment() &&
!Current.isOneOf(tok::r_paren, tok::r_brace))
return true;
if (Style.AlwaysBreakBeforeMultilineStrings &&
State.Column > State.Stack.back().Indent && // Breaking saves columns.
!Previous.isOneOf(tok::kw_return, tok::lessless, tok::at) &&
- Previous.Type != TT_InlineASMColon &&
- Previous.Type != TT_ConditionalExpr && nextIsMultilineString(State))
+ !Previous.isOneOf(TT_InlineASMColon, TT_ConditionalExpr) &&
+ nextIsMultilineString(State))
return true;
- if (((Previous.Type == TT_DictLiteral && Previous.is(tok::l_brace)) ||
- Previous.Type == TT_ArrayInitializerLSquare) &&
+ if (((Previous.is(TT_DictLiteral) && Previous.is(tok::l_brace)) ||
+ Previous.is(TT_ArrayInitializerLSquare)) &&
Style.ColumnLimit > 0 &&
getLengthToMatchingParen(Previous) + State.Column > getColumnLimit(State))
return true;
- if (Current.Type == TT_CtorInitializerColon &&
+ if (Current.is(TT_CtorInitializerColon) &&
((Style.AllowShortFunctionsOnASingleLine != FormatStyle::SFS_All) ||
Style.BreakConstructorInitializersBeforeComma || Style.ColumnLimit != 0))
return true;
if (State.Column < getNewLineColumn(State))
return false;
- if (!Style.BreakBeforeBinaryOperators) {
+ if (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None) {
// If we need to break somewhere inside the LHS of a binary expression, we
// should also break after the operator. Otherwise, the formatting would
// hide the operator precedence, e.g. in:
@@ -172,41 +179,43 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
// expression itself as otherwise, the line breaks seem superfluous.
// We need special cases for ">>" which we have split into two ">" while
// lexing in order to make template parsing easier.
- //
- // FIXME: We'll need something similar for styles that break before binary
- // operators.
bool IsComparison = (Previous.getPrecedence() == prec::Relational ||
Previous.getPrecedence() == prec::Equality) &&
Previous.Previous &&
- Previous.Previous->Type != TT_BinaryOperator; // For >>.
+ Previous.Previous->isNot(TT_BinaryOperator); // For >>.
bool LHSIsBinaryExpr =
Previous.Previous && Previous.Previous->EndsBinaryExpression;
- if (Previous.Type == TT_BinaryOperator &&
- (!IsComparison || LHSIsBinaryExpr) &&
- Current.Type != TT_BinaryOperator && // For >>.
+ if (Previous.is(TT_BinaryOperator) && (!IsComparison || LHSIsBinaryExpr) &&
+ Current.isNot(TT_BinaryOperator) && // For >>.
!Current.isTrailingComment() && !Previous.is(tok::lessless) &&
Previous.getPrecedence() != prec::Assignment &&
State.Stack.back().BreakBeforeParameter)
return true;
+ } else {
+ if (Current.is(TT_BinaryOperator) && Previous.EndsBinaryExpression &&
+ State.Stack.back().BreakBeforeParameter)
+ return true;
}
// Same as above, but for the first "<<" operator.
- if (Current.is(tok::lessless) && Current.Type != TT_OverloadedOperator &&
+ if (Current.is(tok::lessless) && Current.isNot(TT_OverloadedOperator) &&
State.Stack.back().BreakBeforeParameter &&
State.Stack.back().FirstLessLess == 0)
return true;
- if (Current.Type == TT_SelectorName &&
- State.Stack.back().ObjCSelectorNameFound &&
+ if (Current.is(TT_SelectorName) && State.Stack.back().ObjCSelectorNameFound &&
State.Stack.back().BreakBeforeParameter)
return true;
- if (Previous.ClosesTemplateDeclaration && Current.NestingLevel == 0 &&
- !Current.isTrailingComment())
- return true;
+ if (Current.NestingLevel == 0 && !Current.isTrailingComment()) {
+ if (Previous.ClosesTemplateDeclaration)
+ return true;
+ if (Previous.is(TT_LeadingJavaAnnotation) && Current.isNot(tok::l_paren) &&
+ Current.isNot(TT_LeadingJavaAnnotation))
+ return true;
+ }
// If the return type spans multiple lines, wrap before the function name.
- if ((Current.Type == TT_FunctionDeclarationName ||
- Current.is(tok::kw_operator)) &&
+ if (Current.isOneOf(TT_FunctionDeclarationName, tok::kw_operator) &&
State.Stack.back().BreakBeforeParameter)
return true;
@@ -232,7 +241,7 @@ unsigned ContinuationIndenter::addTokenToState(LineState &State, bool Newline,
const FormatToken &Current = *State.NextToken;
assert(!State.Stack.empty());
- if ((Current.Type == TT_ImplicitStringLiteral &&
+ if ((Current.is(TT_ImplicitStringLiteral) &&
(Current.Previous->Tok.getIdentifierInfo() == nullptr ||
Current.Previous->Tok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp_not_keyword))) {
@@ -281,7 +290,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
Whitespaces.replaceWhitespace(Current, /*Newlines=*/0, /*IndentLevel=*/0,
Spaces, State.Column + Spaces);
- if (Current.Type == TT_SelectorName &&
+ if (Current.is(TT_SelectorName) &&
!State.Stack.back().ObjCSelectorNameFound) {
if (Current.LongestObjCSelectorName == 0)
State.Stack.back().AlignColons = false;
@@ -293,36 +302,50 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
State.Stack.back().ColonPos = State.Column + Spaces + Current.ColumnWidth;
}
- if (Previous.opensScope() && Previous.Type != TT_ObjCMethodExpr &&
- (Current.Type != TT_LineComment || Previous.BlockKind == BK_BracedInit))
+ if (Style.AlignAfterOpenBracket && Previous.opensScope() &&
+ Previous.isNot(TT_ObjCMethodExpr) &&
+ (Current.isNot(TT_LineComment) || Previous.BlockKind == BK_BracedInit))
State.Stack.back().Indent = State.Column + Spaces;
if (State.Stack.back().AvoidBinPacking && startsNextParameter(Current, Style))
State.Stack.back().NoLineBreak = true;
if (startsSegmentOfBuilderTypeCall(Current))
State.Stack.back().ContainsUnwrappedBuilder = true;
+ if (Current.isMemberAccess() && Previous.is(tok::r_paren) &&
+ (Previous.MatchingParen &&
+ (Previous.TotalLength - Previous.MatchingParen->TotalLength > 10))) {
+ // If there is a function call with long parameters, break before trailing
+ // calls. This prevents things like:
+ // EXPECT_CALL(SomeLongParameter).Times(
+ // 2);
+ // We don't want to do this for short parameters as they can just be
+ // indexes.
+ State.Stack.back().NoLineBreak = true;
+ }
+
State.Column += Spaces;
if (Current.isNot(tok::comment) && Previous.is(tok::l_paren) &&
- Previous.Previous && Previous.Previous->isOneOf(tok::kw_if, tok::kw_for))
+ Previous.Previous &&
+ Previous.Previous->isOneOf(tok::kw_if, tok::kw_for)) {
// Treat the condition inside an if as if it was a second function
// parameter, i.e. let nested calls have a continuation indent.
State.Stack.back().LastSpace = State.Column;
- else if (!Current.isOneOf(tok::comment, tok::caret) &&
- (Previous.is(tok::comma) ||
- (Previous.is(tok::colon) && Previous.Type == TT_ObjCMethodExpr)))
+ State.Stack.back().NestedBlockIndent = State.Column;
+ } else if (!Current.isOneOf(tok::comment, tok::caret) &&
+ (Previous.is(tok::comma) ||
+ (Previous.is(tok::colon) && Previous.is(TT_ObjCMethodExpr)))) {
State.Stack.back().LastSpace = State.Column;
- else if ((Previous.Type == TT_BinaryOperator ||
- Previous.Type == TT_ConditionalExpr ||
- Previous.Type == TT_CtorInitializerColon) &&
- ((Previous.getPrecedence() != prec::Assignment &&
- (Previous.isNot(tok::lessless) || Previous.OperatorIndex != 0 ||
- !Previous.LastOperator)) ||
- Current.StartsBinaryExpression))
+ } else if ((Previous.isOneOf(TT_BinaryOperator, TT_ConditionalExpr,
+ TT_CtorInitializerColon)) &&
+ ((Previous.getPrecedence() != prec::Assignment &&
+ (Previous.isNot(tok::lessless) || Previous.OperatorIndex != 0 ||
+ !Previous.LastOperator)) ||
+ Current.StartsBinaryExpression)) {
// Always indent relative to the RHS of the expression unless this is a
// simple assignment without binary expression on the RHS. Also indent
// relative to unary operators and the colons of constructor initializers.
State.Stack.back().LastSpace = State.Column;
- else if (Previous.Type == TT_InheritanceColon) {
+ } else if (Previous.is(TT_InheritanceColon)) {
State.Stack.back().Indent = State.Column;
State.Stack.back().LastSpace = State.Column;
} else if (Previous.opensScope()) {
@@ -373,10 +396,11 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
Penalty += Style.PenaltyBreakFirstLessLess;
State.Column = getNewLineColumn(State);
+ State.Stack.back().NestedBlockIndent = State.Column;
if (NextNonComment->isMemberAccess()) {
if (State.Stack.back().CallContinuation == 0)
State.Stack.back().CallContinuation = State.Column;
- } else if (NextNonComment->Type == TT_SelectorName) {
+ } else if (NextNonComment->is(TT_SelectorName)) {
if (!State.Stack.back().ObjCSelectorNameFound) {
if (NextNonComment->LongestObjCSelectorName == 0) {
State.Stack.back().AlignColons = false;
@@ -389,8 +413,7 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
State.Stack.back().ColonPos = State.Column + NextNonComment->ColumnWidth;
}
} else if (PreviousNonComment && PreviousNonComment->is(tok::colon) &&
- (PreviousNonComment->Type == TT_ObjCMethodExpr ||
- PreviousNonComment->Type == TT_DictLiteral)) {
+ PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral)) {
// FIXME: This is hacky, find a better way. The problem is that in an ObjC
// method expression, the block should be aligned to the line starting it,
// e.g.:
@@ -408,9 +431,10 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
if ((Previous.isOneOf(tok::comma, tok::semi) &&
!State.Stack.back().AvoidBinPacking) ||
- Previous.Type == TT_BinaryOperator)
+ Previous.is(TT_BinaryOperator))
State.Stack.back().BreakBeforeParameter = false;
- if (Previous.Type == TT_TemplateCloser && Current.NestingLevel == 0)
+ if (Previous.isOneOf(TT_TemplateCloser, TT_JavaAnnotation) &&
+ Current.NestingLevel == 0)
State.Stack.back().BreakBeforeParameter = false;
if (NextNonComment->is(tok::question) ||
(PreviousNonComment && PreviousNonComment->is(tok::question)))
@@ -431,11 +455,10 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// Any break on this level means that the parent level has been broken
// and we need to avoid bin packing there.
- bool JavaScriptFormat = Style.Language == FormatStyle::LK_JavaScript &&
- Current.is(tok::r_brace) &&
- State.Stack.size() > 1 &&
- State.Stack[State.Stack.size() - 2].JSFunctionInlined;
- if (!JavaScriptFormat) {
+ bool NestedBlockSpecialCase =
+ Current.is(tok::r_brace) && State.Stack.size() > 1 &&
+ State.Stack[State.Stack.size() - 2].NestedBlockInlined;
+ if (!NestedBlockSpecialCase) {
for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i) {
State.Stack[i].BreakBeforeParameter = true;
}
@@ -443,27 +466,27 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
if (PreviousNonComment &&
!PreviousNonComment->isOneOf(tok::comma, tok::semi) &&
- PreviousNonComment->Type != TT_TemplateCloser &&
- PreviousNonComment->Type != TT_BinaryOperator &&
- Current.Type != TT_BinaryOperator && !PreviousNonComment->opensScope())
+ (PreviousNonComment->isNot(TT_TemplateCloser) ||
+ Current.NestingLevel != 0) &&
+ !PreviousNonComment->isOneOf(TT_BinaryOperator, TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation) &&
+ Current.isNot(TT_BinaryOperator) && !PreviousNonComment->opensScope())
State.Stack.back().BreakBeforeParameter = true;
// If we break after { or the [ of an array initializer, we should also break
// before the corresponding } or ].
if (PreviousNonComment &&
- (PreviousNonComment->is(tok::l_brace) ||
- PreviousNonComment->Type == TT_ArrayInitializerLSquare))
+ (PreviousNonComment->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare)))
State.Stack.back().BreakBeforeClosingBrace = true;
if (State.Stack.back().AvoidBinPacking) {
// If we are breaking after '(', '{', '<', this is not bin packing
// unless AllowAllParametersOfDeclarationOnNextLine is false or this is a
// dict/object literal.
- if (!(Previous.isOneOf(tok::l_paren, tok::l_brace) ||
- Previous.Type == TT_BinaryOperator) ||
+ if (!Previous.isOneOf(tok::l_paren, tok::l_brace, TT_BinaryOperator) ||
(!Style.AllowAllParametersOfDeclarationOnNextLine &&
State.Line->MustBeDeclaration) ||
- Previous.Type == TT_DictLiteral)
+ Previous.is(TT_DictLiteral))
State.Stack.back().BreakBeforeParameter = true;
}
@@ -474,7 +497,7 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
if (!State.NextToken || !State.NextToken->Previous)
return 0;
FormatToken &Current = *State.NextToken;
- const FormatToken &Previous = *State.NextToken->Previous;
+ const FormatToken &Previous = *Current.Previous;
// If we are continuing an expression, we want to use the continuation indent.
unsigned ContinuationIndent =
std::max(State.Stack.back().LastSpace, State.Stack.back().Indent) +
@@ -483,22 +506,26 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
const FormatToken *NextNonComment = Previous.getNextNonComment();
if (!NextNonComment)
NextNonComment = &Current;
+
+ // Java specific bits.
+ if (Style.Language == FormatStyle::LK_Java &&
+ Current.isOneOf(Keywords.kw_implements, Keywords.kw_extends))
+ return std::max(State.Stack.back().LastSpace,
+ State.Stack.back().Indent + Style.ContinuationIndentWidth);
+
if (NextNonComment->is(tok::l_brace) && NextNonComment->BlockKind == BK_Block)
return Current.NestingLevel == 0 ? State.FirstIndent
: State.Stack.back().Indent;
if (Current.isOneOf(tok::r_brace, tok::r_square)) {
- if (State.Stack.size() > 1 &&
- State.Stack[State.Stack.size() - 2].JSFunctionInlined)
- return State.FirstIndent;
- if (Current.closesBlockTypeList(Style) ||
- (Current.MatchingParen &&
- Current.MatchingParen->BlockKind == BK_BracedInit))
+ if (Current.closesBlockTypeList(Style))
+ return State.Stack[State.Stack.size() - 2].NestedBlockIndent;
+ if (Current.MatchingParen &&
+ Current.MatchingParen->BlockKind == BK_BracedInit)
return State.Stack[State.Stack.size() - 2].LastSpace;
- else
- return State.FirstIndent;
+ return State.FirstIndent;
}
if (Current.is(tok::identifier) && Current.Next &&
- Current.Next->Type == TT_DictLiteral)
+ Current.Next->is(TT_DictLiteral))
return State.Stack.back().Indent;
if (NextNonComment->isStringLiteral() && State.StartOfStringLiteral != 0)
return State.StartOfStringLiteral;
@@ -506,60 +533,57 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
State.Stack.back().FirstLessLess != 0)
return State.Stack.back().FirstLessLess;
if (NextNonComment->isMemberAccess()) {
- if (State.Stack.back().CallContinuation == 0) {
+ if (State.Stack.back().CallContinuation == 0)
return ContinuationIndent;
- } else {
- return State.Stack.back().CallContinuation;
- }
+ return State.Stack.back().CallContinuation;
}
if (State.Stack.back().QuestionColumn != 0 &&
((NextNonComment->is(tok::colon) &&
- NextNonComment->Type == TT_ConditionalExpr) ||
- Previous.Type == TT_ConditionalExpr))
+ NextNonComment->is(TT_ConditionalExpr)) ||
+ Previous.is(TT_ConditionalExpr)))
return State.Stack.back().QuestionColumn;
if (Previous.is(tok::comma) && State.Stack.back().VariablePos != 0)
return State.Stack.back().VariablePos;
- if ((PreviousNonComment && (PreviousNonComment->ClosesTemplateDeclaration ||
- PreviousNonComment->Type == TT_AttributeParen)) ||
+ if ((PreviousNonComment &&
+ (PreviousNonComment->ClosesTemplateDeclaration ||
+ PreviousNonComment->isOneOf(TT_AttributeParen, TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation))) ||
(!Style.IndentWrappedFunctionNames &&
- (NextNonComment->is(tok::kw_operator) ||
- NextNonComment->Type == TT_FunctionDeclarationName)))
+ NextNonComment->isOneOf(tok::kw_operator, TT_FunctionDeclarationName)))
return std::max(State.Stack.back().LastSpace, State.Stack.back().Indent);
- if (NextNonComment->Type == TT_SelectorName) {
+ if (NextNonComment->is(TT_SelectorName)) {
if (!State.Stack.back().ObjCSelectorNameFound) {
- if (NextNonComment->LongestObjCSelectorName == 0) {
+ if (NextNonComment->LongestObjCSelectorName == 0)
return State.Stack.back().Indent;
- } else {
- return State.Stack.back().Indent +
- NextNonComment->LongestObjCSelectorName -
- NextNonComment->ColumnWidth;
- }
- } else if (!State.Stack.back().AlignColons) {
+ return State.Stack.back().Indent +
+ NextNonComment->LongestObjCSelectorName -
+ NextNonComment->ColumnWidth;
+ }
+ if (!State.Stack.back().AlignColons)
return State.Stack.back().Indent;
- } else if (State.Stack.back().ColonPos > NextNonComment->ColumnWidth) {
+ if (State.Stack.back().ColonPos > NextNonComment->ColumnWidth)
return State.Stack.back().ColonPos - NextNonComment->ColumnWidth;
- } else {
- return State.Stack.back().Indent;
- }
+ return State.Stack.back().Indent;
}
- if (NextNonComment->Type == TT_ArraySubscriptLSquare) {
+ if (NextNonComment->is(TT_ArraySubscriptLSquare)) {
if (State.Stack.back().StartOfArraySubscripts != 0)
return State.Stack.back().StartOfArraySubscripts;
- else
- return ContinuationIndent;
+ return ContinuationIndent;
}
- if (NextNonComment->Type == TT_StartOfName ||
+ if (NextNonComment->is(TT_StartOfName) ||
Previous.isOneOf(tok::coloncolon, tok::equal)) {
return ContinuationIndent;
}
if (PreviousNonComment && PreviousNonComment->is(tok::colon) &&
- (PreviousNonComment->Type == TT_ObjCMethodExpr ||
- PreviousNonComment->Type == TT_DictLiteral))
+ PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral))
return ContinuationIndent;
- if (NextNonComment->Type == TT_CtorInitializerColon)
+ if (NextNonComment->is(TT_CtorInitializerColon))
return State.FirstIndent + Style.ConstructorInitializerIndentWidth;
- if (NextNonComment->Type == TT_CtorInitializerComma)
+ if (NextNonComment->is(TT_CtorInitializerComma))
return State.Stack.back().Indent;
+ if (Previous.is(tok::r_paren) && !Current.isBinaryOperator() &&
+ !Current.isOneOf(tok::colon, tok::comment))
+ return ContinuationIndent;
if (State.Stack.back().Indent == State.FirstIndent && PreviousNonComment &&
PreviousNonComment->isNot(tok::r_brace))
// Ensure that we fall back to the continuation indent width instead of
@@ -573,18 +597,18 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
assert(State.Stack.size());
const FormatToken &Current = *State.NextToken;
- if (Current.Type == TT_InheritanceColon)
+ if (Current.is(TT_InheritanceColon))
State.Stack.back().AvoidBinPacking = true;
- if (Current.is(tok::lessless) && Current.Type != TT_OverloadedOperator) {
+ if (Current.is(tok::lessless) && Current.isNot(TT_OverloadedOperator)) {
if (State.Stack.back().FirstLessLess == 0)
State.Stack.back().FirstLessLess = State.Column;
else
State.Stack.back().LastOperatorWrapped = Newline;
}
- if ((Current.Type == TT_BinaryOperator && Current.isNot(tok::lessless)) ||
- Current.Type == TT_ConditionalExpr)
+ if ((Current.is(TT_BinaryOperator) && Current.isNot(tok::lessless)) ||
+ Current.is(TT_ConditionalExpr))
State.Stack.back().LastOperatorWrapped = Newline;
- if (Current.Type == TT_ArraySubscriptLSquare &&
+ if (Current.is(TT_ArraySubscriptLSquare) &&
State.Stack.back().StartOfArraySubscripts == 0)
State.Stack.back().StartOfArraySubscripts = State.Column;
if ((Current.is(tok::question) && Style.BreakBeforeTernaryOperators) ||
@@ -598,9 +622,9 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
if (Current.isMemberAccess())
State.Stack.back().StartOfFunctionCall =
Current.LastOperator ? 0 : State.Column + Current.ColumnWidth;
- if (Current.Type == TT_SelectorName)
+ if (Current.is(TT_SelectorName))
State.Stack.back().ObjCSelectorNameFound = true;
- if (Current.Type == TT_CtorInitializerColon) {
+ if (Current.is(TT_CtorInitializerColon)) {
// Indent 2 from the column, so:
// SomeClass::SomeClass()
// : First(...), ...
@@ -608,6 +632,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
// ^ line up here.
State.Stack.back().Indent =
State.Column + (Style.BreakConstructorInitializersBeforeComma ? 0 : 2);
+ State.Stack.back().NestedBlockIndent = State.Stack.back().Indent;
if (Style.ConstructorInitializerAllOnOneLineOrOnePerLine)
State.Stack.back().AvoidBinPacking = true;
State.Stack.back().BreakBeforeParameter = false;
@@ -616,7 +641,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
// In ObjC method declaration we align on the ":" of parameters, but we need
// to ensure that we indent parameters on subsequent lines by at least our
// continuation indent width.
- if (Current.Type == TT_ObjCMethodSpecifier)
+ if (Current.is(TT_ObjCMethodSpecifier))
State.Stack.back().Indent += Style.ContinuationIndentWidth;
// Insert scopes created by fake parenthesis.
@@ -628,18 +653,21 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
// foo();
// bar();
// }, a, b, c);
- if (Style.Language == FormatStyle::LK_JavaScript) {
- if (Current.isNot(tok::comment) && Previous && Previous->is(tok::l_brace) &&
- State.Stack.size() > 1) {
- if (State.Stack[State.Stack.size() - 2].JSFunctionInlined && Newline) {
- for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i) {
- State.Stack[i].NoLineBreak = true;
- }
+ if (Current.isNot(tok::comment) && Previous && Previous->is(tok::l_brace) &&
+ State.Stack.size() > 1) {
+ if (State.Stack[State.Stack.size() - 2].NestedBlockInlined && Newline) {
+ for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i) {
+ State.Stack[i].NoLineBreak = true;
}
- State.Stack[State.Stack.size() - 2].JSFunctionInlined = false;
}
- if (Current.TokenText == "function")
- State.Stack.back().JSFunctionInlined = !Newline;
+ State.Stack[State.Stack.size() - 2].NestedBlockInlined = false;
+ }
+ if (Previous && (Previous->isOneOf(tok::l_paren, tok::comma, tok::colon) ||
+ Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr)) &&
+ !Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
+ State.Stack.back().NestedBlockInlined =
+ !Newline &&
+ (Previous->isNot(tok::l_paren) || Previous->ParameterCount > 1);
}
moveStatePastFakeLParens(State, Newline);
@@ -685,8 +713,9 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// is special cased.
bool SkipFirstExtraIndent =
(Previous && (Previous->opensScope() || Previous->is(tok::kw_return) ||
- Previous->getPrecedence() == prec::Assignment ||
- Previous->Type == TT_ObjCMethodExpr));
+ (Previous->getPrecedence() == prec::Assignment &&
+ Style.AlignOperands) ||
+ Previous->is(TT_ObjCMethodExpr)));
for (SmallVectorImpl<prec::Level>::const_reverse_iterator
I = Current.FakeLParens.rbegin(),
E = Current.FakeLParens.rend();
@@ -694,10 +723,15 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
ParenState NewParenState = State.Stack.back();
NewParenState.ContainsLineBreak = false;
- // Indent from 'LastSpace' unless this the fake parentheses encapsulating a
- // builder type call after 'return'. If such a call is line-wrapped, we
- // commonly just want to indent from the start of the line.
- if (!Previous || Previous->isNot(tok::kw_return) || *I > 0)
+ // Indent from 'LastSpace' unless these are fake parentheses encapsulating
+ // a builder type call after 'return' or, if the alignment after opening
+ // brackets is disabled.
+ if (!Current.isTrailingComment() &&
+ (Style.AlignOperands || *I < prec::Assignment) &&
+ (!Previous || Previous->isNot(tok::kw_return) ||
+ (Style.Language != FormatStyle::LK_Java && *I > 0)) &&
+ (Style.AlignAfterOpenBracket || *I != prec::Comma ||
+ Current.NestingLevel == 0))
NewParenState.Indent =
std::max(std::max(State.Column, NewParenState.Indent),
State.Stack.back().LastSpace);
@@ -707,14 +741,14 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// Exclude relational operators, as there, it is always more desirable to
// have the LHS 'left' of the RHS.
if (Previous && Previous->getPrecedence() > prec::Assignment &&
- (Previous->Type == TT_BinaryOperator ||
- Previous->Type == TT_ConditionalExpr) &&
+ Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr) &&
Previous->getPrecedence() != prec::Relational) {
- bool BreakBeforeOperator = Previous->is(tok::lessless) ||
- (Previous->Type == TT_BinaryOperator &&
- Style.BreakBeforeBinaryOperators) ||
- (Previous->Type == TT_ConditionalExpr &&
- Style.BreakBeforeTernaryOperators);
+ bool BreakBeforeOperator =
+ Previous->is(tok::lessless) ||
+ (Previous->is(TT_BinaryOperator) &&
+ Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None) ||
+ (Previous->is(TT_ConditionalExpr) &&
+ Style.BreakBeforeTernaryOperators);
if ((!Newline && !BreakBeforeOperator) ||
(!State.Stack.back().LastOperatorWrapped && BreakBeforeOperator))
NewParenState.NoLineBreak = true;
@@ -728,7 +762,8 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// ParameterToInnerFunction));
if (*I > prec::Unknown)
NewParenState.LastSpace = std::max(NewParenState.LastSpace, State.Column);
- NewParenState.StartOfFunctionCall = State.Column;
+ if (*I != prec::Conditional)
+ NewParenState.StartOfFunctionCall = State.Column;
// Always indent conditional expressions. Never indent expression where
// the 'operator' is ',', ';' or an assignment (i.e. *I <=
@@ -736,7 +771,7 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// other expression, unless the indentation needs to be skipped.
if (*I == prec::Conditional ||
(!SkipFirstExtraIndent && *I > prec::Assignment &&
- !Style.BreakBeforeBinaryOperators))
+ !Current.isTrailingComment()))
NewParenState.Indent += Style.ContinuationIndentWidth;
if ((Previous && !Previous->opensScope()) || *I > prec::Comma)
NewParenState.BreakBeforeParameter = false;
@@ -745,9 +780,8 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
}
}
-// Remove the fake r_parens after 'Tok'.
-static void consumeRParens(LineState& State, const FormatToken &Tok) {
- for (unsigned i = 0, e = Tok.FakeRParens; i != e; ++i) {
+void ContinuationIndenter::moveStatePastFakeRParens(LineState &State) {
+ for (unsigned i = 0, e = State.NextToken->FakeRParens; i != e; ++i) {
unsigned VariablePos = State.Stack.back().VariablePos;
assert(State.Stack.size() > 1);
if (State.Stack.size() == 1) {
@@ -759,46 +793,6 @@ static void consumeRParens(LineState& State, const FormatToken &Tok) {
}
}
-// Returns whether 'Tok' opens or closes a scope requiring special handling
-// of the subsequent fake r_parens.
-//
-// For example, if this is an l_brace starting a nested block, we pretend (wrt.
-// to indentation) that we already consumed the corresponding r_brace. Thus, we
-// remove all ParenStates caused by fake parentheses that end at the r_brace.
-// The net effect of this is that we don't indent relative to the l_brace, if
-// the nested block is the last parameter of a function. This formats:
-//
-// SomeFunction(a, [] {
-// f(); // break
-// });
-//
-// instead of:
-// SomeFunction(a, [] {
-// f(); // break
-// });
-static bool fakeRParenSpecialCase(const LineState &State) {
- const FormatToken &Tok = *State.NextToken;
- if (!Tok.MatchingParen)
- return false;
- const FormatToken *Left = &Tok;
- if (Tok.isOneOf(tok::r_brace, tok::r_square))
- Left = Tok.MatchingParen;
- return !State.Stack.back().HasMultipleNestedBlocks &&
- Left->isOneOf(tok::l_brace, tok::l_square) &&
- (Left->BlockKind == BK_Block ||
- Left->Type == TT_ArrayInitializerLSquare ||
- Left->Type == TT_DictLiteral);
-}
-
-void ContinuationIndenter::moveStatePastFakeRParens(LineState &State) {
- // Don't remove FakeRParens attached to r_braces that surround nested blocks
- // as they will have been removed early (see above).
- if (fakeRParenSpecialCase(State))
- return;
-
- consumeRParens(State, *State.NextToken);
-}
-
void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
bool Newline) {
const FormatToken &Current = *State.NextToken;
@@ -814,48 +808,46 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
unsigned NewIndentLevel = State.Stack.back().IndentLevel;
bool AvoidBinPacking;
bool BreakBeforeParameter = false;
- if (Current.is(tok::l_brace) || Current.Type == TT_ArrayInitializerLSquare) {
- if (fakeRParenSpecialCase(State))
- consumeRParens(State, *Current.MatchingParen);
-
- NewIndent = State.Stack.back().LastSpace;
+ if (Current.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare)) {
if (Current.opensBlockTypeList(Style)) {
- NewIndent += Style.IndentWidth;
+ NewIndent = State.Stack.back().NestedBlockIndent + Style.IndentWidth;
NewIndent = std::min(State.Column + 2, NewIndent);
++NewIndentLevel;
} else {
- NewIndent += Style.ContinuationIndentWidth;
+ NewIndent = State.Stack.back().LastSpace + Style.ContinuationIndentWidth;
NewIndent = std::min(State.Column + 1, NewIndent);
}
const FormatToken *NextNoComment = Current.getNextNonComment();
- AvoidBinPacking = Current.Type == TT_ArrayInitializerLSquare ||
- Current.Type == TT_DictLiteral ||
- Style.Language == FormatStyle::LK_Proto ||
- !Style.BinPackParameters ||
- (NextNoComment &&
- NextNoComment->Type == TT_DesignatedInitializerPeriod);
+ AvoidBinPacking =
+ Current.isOneOf(TT_ArrayInitializerLSquare, TT_DictLiteral) ||
+ Style.Language == FormatStyle::LK_Proto || !Style.BinPackParameters ||
+ (NextNoComment && NextNoComment->is(TT_DesignatedInitializerPeriod));
} else {
NewIndent = Style.ContinuationIndentWidth +
std::max(State.Stack.back().LastSpace,
State.Stack.back().StartOfFunctionCall);
- AvoidBinPacking = !Style.BinPackParameters ||
- (Style.ExperimentalAutoDetectBinPacking &&
- (Current.PackingKind == PPK_OnePerLine ||
- (!BinPackInconclusiveFunctions &&
- Current.PackingKind == PPK_Inconclusive)));
+ AvoidBinPacking =
+ (State.Line->MustBeDeclaration && !Style.BinPackParameters) ||
+ (!State.Line->MustBeDeclaration && !Style.BinPackArguments) ||
+ (Style.ExperimentalAutoDetectBinPacking &&
+ (Current.PackingKind == PPK_OnePerLine ||
+ (!BinPackInconclusiveFunctions &&
+ Current.PackingKind == PPK_Inconclusive)));
// If this '[' opens an ObjC call, determine whether all parameters fit
// into one line and put one per line if they don't.
- if (Current.Type == TT_ObjCMethodExpr && Style.ColumnLimit != 0 &&
+ if (Current.is(TT_ObjCMethodExpr) && Style.ColumnLimit != 0 &&
getLengthToMatchingParen(Current) + State.Column >
getColumnLimit(State))
BreakBeforeParameter = true;
}
bool NoLineBreak = State.Stack.back().NoLineBreak ||
- (Current.Type == TT_TemplateOpener &&
+ (Current.is(TT_TemplateOpener) &&
State.Stack.back().ContainsUnwrappedBuilder);
+ unsigned NestedBlockIndent = State.Stack.back().NestedBlockIndent;
State.Stack.push_back(ParenState(NewIndent, NewIndentLevel,
State.Stack.back().LastSpace,
AvoidBinPacking, NoLineBreak));
+ State.Stack.back().NestedBlockIndent = NestedBlockIndent;
State.Stack.back().BreakBeforeParameter = BreakBeforeParameter;
State.Stack.back().HasMultipleNestedBlocks = Current.BlockParameterCount > 1;
}
@@ -870,7 +862,7 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
if (State.Stack.size() > 1 &&
(Current.isOneOf(tok::r_paren, tok::r_square) ||
(Current.is(tok::r_brace) && State.NextToken != State.Line->First) ||
- State.NextToken->Type == TT_TemplateCloser))
+ State.NextToken->is(TT_TemplateCloser)))
State.Stack.pop_back();
if (Current.is(tok::r_square)) {
@@ -882,20 +874,17 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
}
void ContinuationIndenter::moveStateToNewBlock(LineState &State) {
- // If we have already found more than one lambda introducers on this level, we
- // opt out of this because similarity between the lambdas is more important.
- if (fakeRParenSpecialCase(State))
- consumeRParens(State, *State.NextToken->MatchingParen);
-
- // For some reason, ObjC blocks are indented like continuations.
- unsigned NewIndent = State.Stack.back().LastSpace +
- (State.NextToken->Type == TT_ObjCBlockLBrace
- ? Style.ContinuationIndentWidth
- : Style.IndentWidth);
+ unsigned NestedBlockIndent = State.Stack.back().NestedBlockIndent;
+ // ObjC block sometimes follow special indentation rules.
+ unsigned NewIndent =
+ NestedBlockIndent + (State.NextToken->is(TT_ObjCBlockLBrace)
+ ? Style.ObjCBlockIndentWidth
+ : Style.IndentWidth);
State.Stack.push_back(ParenState(
NewIndent, /*NewIndentLevel=*/State.Stack.back().IndentLevel + 1,
State.Stack.back().LastSpace, /*AvoidBinPacking=*/true,
State.Stack.back().NoLineBreak));
+ State.Stack.back().NestedBlockIndent = NestedBlockIndent;
State.Stack.back().BreakBeforeParameter = true;
}
@@ -915,34 +904,17 @@ unsigned ContinuationIndenter::addMultilineToken(const FormatToken &Current,
return 0;
}
-static bool getRawStringLiteralPrefixPostfix(StringRef Text, StringRef &Prefix,
- StringRef &Postfix) {
- if (Text.startswith(Prefix = "R\"") || Text.startswith(Prefix = "uR\"") ||
- Text.startswith(Prefix = "UR\"") || Text.startswith(Prefix = "u8R\"") ||
- Text.startswith(Prefix = "LR\"")) {
- size_t ParenPos = Text.find('(');
- if (ParenPos != StringRef::npos) {
- StringRef Delimiter =
- Text.substr(Prefix.size(), ParenPos - Prefix.size());
- Prefix = Text.substr(0, ParenPos + 1);
- Postfix = Text.substr(Text.size() - 2 - Delimiter.size());
- return Postfix.front() == ')' && Postfix.back() == '"' &&
- Postfix.substr(1).startswith(Delimiter);
- }
- }
- return false;
-}
-
unsigned ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
LineState &State,
bool DryRun) {
// Don't break multi-line tokens other than block comments. Instead, just
// update the state.
- if (Current.Type != TT_BlockComment && Current.IsMultiline)
+ if (Current.isNot(TT_BlockComment) && Current.IsMultiline)
return addMultilineToken(Current, State);
- // Don't break implicit string literals.
- if (Current.Type == TT_ImplicitStringLiteral)
+ // Don't break implicit string literals or import statements.
+ if (Current.is(TT_ImplicitStringLiteral) ||
+ State.Line->Type == LT_ImportStatement)
return 0;
if (!Current.isStringLiteral() && !Current.is(tok::comment))
@@ -953,6 +925,12 @@ unsigned ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
unsigned ColumnLimit = getColumnLimit(State);
if (Current.isStringLiteral()) {
+ // FIXME: String literal breaking is currently disabled for Java and JS, as
+ // it requires strings to be merged using "+" which we don't support.
+ if (Style.Language == FormatStyle::LK_Java ||
+ Style.Language == FormatStyle::LK_JavaScript)
+ return 0;
+
// Don't break string literals inside preprocessor directives (except for
// #define directives, as their contents are stored in separate lines and
// are not affected by this check).
@@ -983,23 +961,22 @@ unsigned ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
Text.startswith(Prefix = "u\"") || Text.startswith(Prefix = "U\"") ||
Text.startswith(Prefix = "u8\"") ||
Text.startswith(Prefix = "L\""))) ||
- (Text.startswith(Prefix = "_T(\"") && Text.endswith(Postfix = "\")")) ||
- getRawStringLiteralPrefixPostfix(Text, Prefix, Postfix)) {
+ (Text.startswith(Prefix = "_T(\"") && Text.endswith(Postfix = "\")"))) {
Token.reset(new BreakableStringLiteral(
Current, State.Line->Level, StartColumn, Prefix, Postfix,
State.Line->InPPDirective, Encoding, Style));
} else {
return 0;
}
- } else if (Current.Type == TT_BlockComment && Current.isTrailingComment()) {
+ } else if (Current.is(TT_BlockComment) && Current.isTrailingComment()) {
if (CommentPragmasRegex.match(Current.TokenText.substr(2)))
return 0;
Token.reset(new BreakableBlockComment(
Current, State.Line->Level, StartColumn, Current.OriginalColumn,
!Current.Previous, State.Line->InPPDirective, Encoding, Style));
- } else if (Current.Type == TT_LineComment &&
+ } else if (Current.is(TT_LineComment) &&
(Current.Previous == nullptr ||
- Current.Previous->Type != TT_ImplicitStringLiteral)) {
+ Current.Previous->isNot(TT_ImplicitStringLiteral))) {
if (CommentPragmasRegex.match(Current.TokenText.substr(2)))
return 0;
Token.reset(new BreakableLineComment(Current, State.Line->Level,
@@ -1073,7 +1050,7 @@ unsigned ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// If we break the token inside a parameter list, we need to break before
// the next parameter on all levels, so that the next parameter is clearly
// visible. Line comments already introduce a break.
- if (Current.Type != TT_LineComment) {
+ if (Current.isNot(TT_LineComment)) {
for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
State.Stack[i].BreakBeforeParameter = true;
}
@@ -1093,7 +1070,7 @@ unsigned ContinuationIndenter::getColumnLimit(const LineState &State) const {
bool ContinuationIndenter::nextIsMultilineString(const LineState &State) {
const FormatToken &Current = *State.NextToken;
- if (!Current.isStringLiteral() || Current.Type == TT_ImplicitStringLiteral)
+ if (!Current.isStringLiteral() || Current.is(TT_ImplicitStringLiteral))
return false;
// We never consider raw string literals "multiline" for the purpose of
// AlwaysBreakBeforeMultilineStrings implementation as they are special-cased
diff --git a/lib/Format/ContinuationIndenter.h b/lib/Format/ContinuationIndenter.h
index 0969a8cec983..36691d945b4f 100644
--- a/lib/Format/ContinuationIndenter.h
+++ b/lib/Format/ContinuationIndenter.h
@@ -13,10 +13,11 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FORMAT_CONTINUATION_INDENTER_H
-#define LLVM_CLANG_FORMAT_CONTINUATION_INDENTER_H
+#ifndef LLVM_CLANG_LIB_FORMAT_CONTINUATIONINDENTER_H
+#define LLVM_CLANG_LIB_FORMAT_CONTINUATIONINDENTER_H
#include "Encoding.h"
+#include "FormatToken.h"
#include "clang/Format/Format.h"
#include "llvm/Support/Regex.h"
@@ -35,8 +36,9 @@ class ContinuationIndenter {
public:
/// \brief Constructs a \c ContinuationIndenter to format \p Line starting in
/// column \p FirstIndent.
- ContinuationIndenter(const FormatStyle &Style, SourceManager &SourceMgr,
- WhitespaceManager &Whitespaces,
+ ContinuationIndenter(const FormatStyle &Style,
+ const AdditionalKeywords &Keywords,
+ SourceManager &SourceMgr, WhitespaceManager &Whitespaces,
encoding::Encoding Encoding,
bool BinPackInconclusiveFunctions);
@@ -134,6 +136,7 @@ private:
bool nextIsMultilineString(const LineState &State);
FormatStyle Style;
+ const AdditionalKeywords &Keywords;
SourceManager &SourceMgr;
WhitespaceManager &Whitespaces;
encoding::Encoding Encoding;
@@ -145,14 +148,15 @@ struct ParenState {
ParenState(unsigned Indent, unsigned IndentLevel, unsigned LastSpace,
bool AvoidBinPacking, bool NoLineBreak)
: Indent(Indent), IndentLevel(IndentLevel), LastSpace(LastSpace),
- FirstLessLess(0), BreakBeforeClosingBrace(false), QuestionColumn(0),
+ NestedBlockIndent(Indent), FirstLessLess(0),
+ BreakBeforeClosingBrace(false), QuestionColumn(0),
AvoidBinPacking(AvoidBinPacking), BreakBeforeParameter(false),
NoLineBreak(NoLineBreak), LastOperatorWrapped(true), ColonPos(0),
StartOfFunctionCall(0), StartOfArraySubscripts(0),
NestedNameSpecifierContinuation(0), CallContinuation(0), VariablePos(0),
ContainsLineBreak(false), ContainsUnwrappedBuilder(0),
AlignColons(true), ObjCSelectorNameFound(false),
- HasMultipleNestedBlocks(false), JSFunctionInlined(false) {}
+ HasMultipleNestedBlocks(false), NestedBlockInlined(false) {}
/// \brief The position to which a specific parenthesis level needs to be
/// indented.
@@ -168,6 +172,10 @@ struct ParenState {
/// OtherParameter));
unsigned LastSpace;
+ /// \brief If a block relative to this parenthesis level gets wrapped, indent
+ /// it this much.
+ unsigned NestedBlockIndent;
+
/// \brief The position the first "<<" operator encountered on each level.
///
/// Used to align "<<" operators. 0 if no such operator has been encountered
@@ -253,15 +261,17 @@ struct ParenState {
/// the same token.
bool HasMultipleNestedBlocks;
- // \brief The previous JavaScript 'function' keyword is not wrapped to a new
- // line.
- bool JSFunctionInlined;
+ // \brief The start of a nested block (e.g. lambda introducer in C++ or
+ // "function" in JavaScript) is not wrapped to a new line.
+ bool NestedBlockInlined;
bool operator<(const ParenState &Other) const {
if (Indent != Other.Indent)
return Indent < Other.Indent;
if (LastSpace != Other.LastSpace)
return LastSpace < Other.LastSpace;
+ if (NestedBlockIndent != Other.NestedBlockIndent)
+ return NestedBlockIndent < Other.NestedBlockIndent;
if (FirstLessLess != Other.FirstLessLess)
return FirstLessLess < Other.FirstLessLess;
if (BreakBeforeClosingBrace != Other.BreakBeforeClosingBrace)
@@ -290,8 +300,8 @@ struct ParenState {
return ContainsLineBreak < Other.ContainsLineBreak;
if (ContainsUnwrappedBuilder != Other.ContainsUnwrappedBuilder)
return ContainsUnwrappedBuilder < Other.ContainsUnwrappedBuilder;
- if (JSFunctionInlined != Other.JSFunctionInlined)
- return JSFunctionInlined < Other.JSFunctionInlined;
+ if (NestedBlockInlined != Other.NestedBlockInlined)
+ return NestedBlockInlined < Other.NestedBlockInlined;
return false;
}
};
@@ -370,4 +380,4 @@ struct LineState {
} // end namespace format
} // end namespace clang
-#endif // LLVM_CLANG_FORMAT_CONTINUATION_INDENTER_H
+#endif
diff --git a/lib/Format/Encoding.h b/lib/Format/Encoding.h
index dba5174b97b4..766d29274ce6 100644
--- a/lib/Format/Encoding.h
+++ b/lib/Format/Encoding.h
@@ -13,8 +13,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FORMAT_ENCODING_H
-#define LLVM_CLANG_FORMAT_ENCODING_H
+#ifndef LLVM_CLANG_LIB_FORMAT_ENCODING_H
+#define LLVM_CLANG_LIB_FORMAT_ENCODING_H
#include "clang/Basic/LLVM.h"
#include "llvm/Support/ConvertUTF.h"
@@ -143,4 +143,4 @@ inline unsigned getEscapeSequenceLength(StringRef Text) {
} // namespace format
} // namespace clang
-#endif // LLVM_CLANG_FORMAT_ENCODING_H
+#endif
diff --git a/lib/Format/Format.cpp b/lib/Format/Format.cpp
index 58dd5604e427..2a4721f2b3b7 100644
--- a/lib/Format/Format.cpp
+++ b/lib/Format/Format.cpp
@@ -15,6 +15,7 @@
#include "ContinuationIndenter.h"
#include "TokenAnnotator.h"
+#include "UnwrappedLineFormatter.h"
#include "UnwrappedLineParser.h"
#include "WhitespaceManager.h"
#include "clang/Basic/Diagnostic.h"
@@ -41,6 +42,7 @@ namespace yaml {
template <> struct ScalarEnumerationTraits<FormatStyle::LanguageKind> {
static void enumeration(IO &IO, FormatStyle::LanguageKind &Value) {
IO.enumCase(Value, "Cpp", FormatStyle::LK_Cpp);
+ IO.enumCase(Value, "Java", FormatStyle::LK_Java);
IO.enumCase(Value, "JavaScript", FormatStyle::LK_JavaScript);
IO.enumCase(Value, "Proto", FormatStyle::LK_Proto);
}
@@ -73,6 +75,17 @@ template <> struct ScalarEnumerationTraits<FormatStyle::ShortFunctionStyle> {
IO.enumCase(Value, "All", FormatStyle::SFS_All);
IO.enumCase(Value, "true", FormatStyle::SFS_All);
IO.enumCase(Value, "Inline", FormatStyle::SFS_Inline);
+ IO.enumCase(Value, "Empty", FormatStyle::SFS_Empty);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::BinaryOperatorStyle> {
+ static void enumeration(IO &IO, FormatStyle::BinaryOperatorStyle &Value) {
+ IO.enumCase(Value, "All", FormatStyle::BOS_All);
+ IO.enumCase(Value, "true", FormatStyle::BOS_All);
+ IO.enumCase(Value, "None", FormatStyle::BOS_None);
+ IO.enumCase(Value, "false", FormatStyle::BOS_None);
+ IO.enumCase(Value, "NonAssignment", FormatStyle::BOS_NonAssignment);
}
};
@@ -159,20 +172,24 @@ template <> struct MappingTraits<FormatStyle> {
}
IO.mapOptional("AccessModifierOffset", Style.AccessModifierOffset);
- IO.mapOptional("ConstructorInitializerIndentWidth",
- Style.ConstructorInitializerIndentWidth);
+ IO.mapOptional("AlignAfterOpenBracket", Style.AlignAfterOpenBracket);
IO.mapOptional("AlignEscapedNewlinesLeft", Style.AlignEscapedNewlinesLeft);
+ IO.mapOptional("AlignOperands", Style.AlignOperands);
IO.mapOptional("AlignTrailingComments", Style.AlignTrailingComments);
IO.mapOptional("AllowAllParametersOfDeclarationOnNextLine",
Style.AllowAllParametersOfDeclarationOnNextLine);
IO.mapOptional("AllowShortBlocksOnASingleLine",
Style.AllowShortBlocksOnASingleLine);
+ IO.mapOptional("AllowShortCaseLabelsOnASingleLine",
+ Style.AllowShortCaseLabelsOnASingleLine);
IO.mapOptional("AllowShortIfStatementsOnASingleLine",
Style.AllowShortIfStatementsOnASingleLine);
IO.mapOptional("AllowShortLoopsOnASingleLine",
Style.AllowShortLoopsOnASingleLine);
IO.mapOptional("AllowShortFunctionsOnASingleLine",
Style.AllowShortFunctionsOnASingleLine);
+ IO.mapOptional("AlwaysBreakAfterDefinitionReturnType",
+ Style.AlwaysBreakAfterDefinitionReturnType);
IO.mapOptional("AlwaysBreakTemplateDeclarations",
Style.AlwaysBreakTemplateDeclarations);
IO.mapOptional("AlwaysBreakBeforeMultilineStrings",
@@ -184,9 +201,12 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("BreakConstructorInitializersBeforeComma",
Style.BreakConstructorInitializersBeforeComma);
IO.mapOptional("BinPackParameters", Style.BinPackParameters);
+ IO.mapOptional("BinPackArguments", Style.BinPackArguments);
IO.mapOptional("ColumnLimit", Style.ColumnLimit);
IO.mapOptional("ConstructorInitializerAllOnOneLineOrOnePerLine",
Style.ConstructorInitializerAllOnOneLineOrOnePerLine);
+ IO.mapOptional("ConstructorInitializerIndentWidth",
+ Style.ConstructorInitializerIndentWidth);
IO.mapOptional("DerivePointerAlignment", Style.DerivePointerAlignment);
IO.mapOptional("ExperimentalAutoDetectBinPacking",
Style.ExperimentalAutoDetectBinPacking);
@@ -199,6 +219,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("KeepEmptyLinesAtTheStartOfBlocks",
Style.KeepEmptyLinesAtTheStartOfBlocks);
IO.mapOptional("NamespaceIndentation", Style.NamespaceIndentation);
+ IO.mapOptional("ObjCBlockIndentWidth", Style.ObjCBlockIndentWidth);
IO.mapOptional("ObjCSpaceAfterProperty", Style.ObjCSpaceAfterProperty);
IO.mapOptional("ObjCSpaceBeforeProtocolList",
Style.ObjCSpaceBeforeProtocolList);
@@ -221,10 +242,12 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("UseTab", Style.UseTab);
IO.mapOptional("BreakBeforeBraces", Style.BreakBeforeBraces);
IO.mapOptional("SpacesInParentheses", Style.SpacesInParentheses);
+ IO.mapOptional("SpacesInSquareBrackets", Style.SpacesInSquareBrackets);
IO.mapOptional("SpacesInAngles", Style.SpacesInAngles);
IO.mapOptional("SpaceInEmptyParentheses", Style.SpaceInEmptyParentheses);
IO.mapOptional("SpacesInCStyleCastParentheses",
Style.SpacesInCStyleCastParentheses);
+ IO.mapOptional("SpaceAfterCStyleCast", Style.SpaceAfterCStyleCast);
IO.mapOptional("SpacesInContainerLiterals",
Style.SpacesInContainerLiterals);
IO.mapOptional("SpaceBeforeAssignmentOperators",
@@ -305,16 +328,21 @@ FormatStyle getLLVMStyle() {
LLVMStyle.Language = FormatStyle::LK_Cpp;
LLVMStyle.AccessModifierOffset = -2;
LLVMStyle.AlignEscapedNewlinesLeft = false;
+ LLVMStyle.AlignAfterOpenBracket = true;
+ LLVMStyle.AlignOperands = true;
LLVMStyle.AlignTrailingComments = true;
LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
LLVMStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_All;
LLVMStyle.AllowShortBlocksOnASingleLine = false;
+ LLVMStyle.AllowShortCaseLabelsOnASingleLine = false;
LLVMStyle.AllowShortIfStatementsOnASingleLine = false;
LLVMStyle.AllowShortLoopsOnASingleLine = false;
+ LLVMStyle.AlwaysBreakAfterDefinitionReturnType = false;
LLVMStyle.AlwaysBreakBeforeMultilineStrings = false;
LLVMStyle.AlwaysBreakTemplateDeclarations = false;
LLVMStyle.BinPackParameters = true;
- LLVMStyle.BreakBeforeBinaryOperators = false;
+ LLVMStyle.BinPackArguments = true;
+ LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
LLVMStyle.BreakBeforeTernaryOperators = true;
LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
LLVMStyle.BreakConstructorInitializersBeforeComma = false;
@@ -336,6 +364,7 @@ FormatStyle getLLVMStyle() {
LLVMStyle.MaxEmptyLinesToKeep = 1;
LLVMStyle.KeepEmptyLinesAtTheStartOfBlocks = true;
LLVMStyle.NamespaceIndentation = FormatStyle::NI_None;
+ LLVMStyle.ObjCBlockIndentWidth = 2;
LLVMStyle.ObjCSpaceAfterProperty = false;
LLVMStyle.ObjCSpaceBeforeProtocolList = true;
LLVMStyle.PointerAlignment = FormatStyle::PAS_Right;
@@ -343,9 +372,11 @@ FormatStyle getLLVMStyle() {
LLVMStyle.Standard = FormatStyle::LS_Cpp11;
LLVMStyle.UseTab = FormatStyle::UT_Never;
LLVMStyle.SpacesInParentheses = false;
+ LLVMStyle.SpacesInSquareBrackets = false;
LLVMStyle.SpaceInEmptyParentheses = false;
LLVMStyle.SpacesInContainerLiterals = true;
LLVMStyle.SpacesInCStyleCastParentheses = false;
+ LLVMStyle.SpaceAfterCStyleCast = false;
LLVMStyle.SpaceBeforeParens = FormatStyle::SBPO_ControlStatements;
LLVMStyle.SpaceBeforeAssignmentOperators = true;
LLVMStyle.SpacesInAngles = false;
@@ -385,10 +416,23 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.PenaltyReturnTypeOnItsOwnLine = 200;
GoogleStyle.PenaltyBreakBeforeFirstCallParameter = 1;
- if (Language == FormatStyle::LK_JavaScript) {
+ if (Language == FormatStyle::LK_Java) {
+ GoogleStyle.AlignAfterOpenBracket = false;
+ GoogleStyle.AlignOperands = false;
+ GoogleStyle.AlignTrailingComments = false;
+ GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
+ GoogleStyle.AllowShortIfStatementsOnASingleLine = false;
+ GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
+ GoogleStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_NonAssignment;
+ GoogleStyle.ColumnLimit = 100;
+ GoogleStyle.SpaceAfterCStyleCast = true;
+ GoogleStyle.SpacesBeforeTrailingComments = 1;
+ } else if (Language == FormatStyle::LK_JavaScript) {
GoogleStyle.BreakBeforeTernaryOperators = false;
GoogleStyle.MaxEmptyLinesToKeep = 3;
GoogleStyle.SpacesInContainerLiterals = false;
+ GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
+ GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
} else if (Language == FormatStyle::LK_Proto) {
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None;
GoogleStyle.SpacesInContainerLiterals = false;
@@ -399,13 +443,18 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
FormatStyle ChromiumStyle = getGoogleStyle(Language);
- ChromiumStyle.AllowAllParametersOfDeclarationOnNextLine = false;
- ChromiumStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
- ChromiumStyle.AllowShortIfStatementsOnASingleLine = false;
- ChromiumStyle.AllowShortLoopsOnASingleLine = false;
- ChromiumStyle.BinPackParameters = false;
- ChromiumStyle.DerivePointerAlignment = false;
- ChromiumStyle.Standard = FormatStyle::LS_Cpp03;
+ if (Language == FormatStyle::LK_Java) {
+ ChromiumStyle.AllowShortIfStatementsOnASingleLine = true;
+ ChromiumStyle.IndentWidth = 4;
+ ChromiumStyle.ContinuationIndentWidth = 8;
+ } else {
+ ChromiumStyle.AllowAllParametersOfDeclarationOnNextLine = false;
+ ChromiumStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
+ ChromiumStyle.AllowShortIfStatementsOnASingleLine = false;
+ ChromiumStyle.AllowShortLoopsOnASingleLine = false;
+ ChromiumStyle.BinPackParameters = false;
+ ChromiumStyle.DerivePointerAlignment = false;
+ }
return ChromiumStyle;
}
@@ -427,14 +476,17 @@ FormatStyle getMozillaStyle() {
FormatStyle getWebKitStyle() {
FormatStyle Style = getLLVMStyle();
Style.AccessModifierOffset = -4;
+ Style.AlignAfterOpenBracket = false;
+ Style.AlignOperands = false;
Style.AlignTrailingComments = false;
- Style.BreakBeforeBinaryOperators = true;
+ Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
Style.BreakBeforeBraces = FormatStyle::BS_Stroustrup;
Style.BreakConstructorInitializersBeforeComma = true;
Style.Cpp11BracedListStyle = false;
Style.ColumnLimit = 0;
Style.IndentWidth = 4;
Style.NamespaceIndentation = FormatStyle::NI_Inner;
+ Style.ObjCBlockIndentWidth = 4;
Style.ObjCSpaceAfterProperty = true;
Style.PointerAlignment = FormatStyle::PAS_Left;
Style.Standard = FormatStyle::LS_Cpp03;
@@ -443,7 +495,8 @@ FormatStyle getWebKitStyle() {
FormatStyle getGNUStyle() {
FormatStyle Style = getLLVMStyle();
- Style.BreakBeforeBinaryOperators = true;
+ Style.AlwaysBreakAfterDefinitionReturnType = true;
+ Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
Style.BreakBeforeBraces = FormatStyle::BS_GNU;
Style.BreakBeforeTernaryOperators = true;
Style.Cpp11BracedListStyle = false;
@@ -542,736 +595,18 @@ std::string configurationAsText(const FormatStyle &Style) {
namespace {
-class NoColumnLimitFormatter {
-public:
- NoColumnLimitFormatter(ContinuationIndenter *Indenter) : Indenter(Indenter) {}
-
- /// \brief Formats the line starting at \p State, simply keeping all of the
- /// input's line breaking decisions.
- void format(unsigned FirstIndent, const AnnotatedLine *Line) {
- LineState State =
- Indenter->getInitialState(FirstIndent, Line, /*DryRun=*/false);
- while (State.NextToken) {
- bool Newline =
- Indenter->mustBreak(State) ||
- (Indenter->canBreak(State) && State.NextToken->NewlinesBefore > 0);
- Indenter->addTokenToState(State, Newline, /*DryRun=*/false);
- }
- }
-
-private:
- ContinuationIndenter *Indenter;
-};
-
-class LineJoiner {
-public:
- LineJoiner(const FormatStyle &Style) : Style(Style) {}
-
- /// \brief Calculates how many lines can be merged into 1 starting at \p I.
- unsigned
- tryFitMultipleLinesInOne(unsigned Indent,
- SmallVectorImpl<AnnotatedLine *>::const_iterator I,
- SmallVectorImpl<AnnotatedLine *>::const_iterator E) {
- // We can never merge stuff if there are trailing line comments.
- const AnnotatedLine *TheLine = *I;
- if (TheLine->Last->Type == TT_LineComment)
- return 0;
-
- if (Style.ColumnLimit > 0 && Indent > Style.ColumnLimit)
- return 0;
-
- unsigned Limit =
- Style.ColumnLimit == 0 ? UINT_MAX : Style.ColumnLimit - Indent;
- // If we already exceed the column limit, we set 'Limit' to 0. The different
- // tryMerge..() functions can then decide whether to still do merging.
- Limit = TheLine->Last->TotalLength > Limit
- ? 0
- : Limit - TheLine->Last->TotalLength;
-
- if (I + 1 == E || I[1]->Type == LT_Invalid || I[1]->First->MustBreakBefore)
- return 0;
-
- // FIXME: TheLine->Level != 0 might or might not be the right check to do.
- // If necessary, change to something smarter.
- bool MergeShortFunctions =
- Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_All ||
- (Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_Inline &&
- TheLine->Level != 0);
-
- if (TheLine->Last->Type == TT_FunctionLBrace &&
- TheLine->First != TheLine->Last) {
- return MergeShortFunctions ? tryMergeSimpleBlock(I, E, Limit) : 0;
- }
- if (TheLine->Last->is(tok::l_brace)) {
- return Style.BreakBeforeBraces == FormatStyle::BS_Attach
- ? tryMergeSimpleBlock(I, E, Limit)
- : 0;
- }
- if (I[1]->First->Type == TT_FunctionLBrace &&
- Style.BreakBeforeBraces != FormatStyle::BS_Attach) {
- // Check for Limit <= 2 to account for the " {".
- if (Limit <= 2 || (Style.ColumnLimit == 0 && containsMustBreak(TheLine)))
- return 0;
- Limit -= 2;
-
- unsigned MergedLines = 0;
- if (MergeShortFunctions) {
- MergedLines = tryMergeSimpleBlock(I + 1, E, Limit);
- // If we managed to merge the block, count the function header, which is
- // on a separate line.
- if (MergedLines > 0)
- ++MergedLines;
- }
- return MergedLines;
- }
- if (TheLine->First->is(tok::kw_if)) {
- return Style.AllowShortIfStatementsOnASingleLine
- ? tryMergeSimpleControlStatement(I, E, Limit)
- : 0;
- }
- if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while)) {
- return Style.AllowShortLoopsOnASingleLine
- ? tryMergeSimpleControlStatement(I, E, Limit)
- : 0;
- }
- if (TheLine->InPPDirective &&
- (TheLine->First->HasUnescapedNewline || TheLine->First->IsFirst)) {
- return tryMergeSimplePPDirective(I, E, Limit);
- }
- return 0;
- }
-
-private:
- unsigned
- tryMergeSimplePPDirective(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
- SmallVectorImpl<AnnotatedLine *>::const_iterator E,
- unsigned Limit) {
- if (Limit == 0)
- return 0;
- if (!I[1]->InPPDirective || I[1]->First->HasUnescapedNewline)
- return 0;
- if (I + 2 != E && I[2]->InPPDirective && !I[2]->First->HasUnescapedNewline)
- return 0;
- if (1 + I[1]->Last->TotalLength > Limit)
- return 0;
- return 1;
- }
-
- unsigned tryMergeSimpleControlStatement(
- SmallVectorImpl<AnnotatedLine *>::const_iterator I,
- SmallVectorImpl<AnnotatedLine *>::const_iterator E, unsigned Limit) {
- if (Limit == 0)
- return 0;
- if ((Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
- Style.BreakBeforeBraces == FormatStyle::BS_GNU) &&
- (I[1]->First->is(tok::l_brace) && !Style.AllowShortBlocksOnASingleLine))
- return 0;
- if (I[1]->InPPDirective != (*I)->InPPDirective ||
- (I[1]->InPPDirective && I[1]->First->HasUnescapedNewline))
- return 0;
- Limit = limitConsideringMacros(I + 1, E, Limit);
- AnnotatedLine &Line = **I;
- if (Line.Last->isNot(tok::r_paren))
- return 0;
- if (1 + I[1]->Last->TotalLength > Limit)
- return 0;
- if (I[1]->First->isOneOf(tok::semi, tok::kw_if, tok::kw_for,
- tok::kw_while) ||
- I[1]->First->Type == TT_LineComment)
- return 0;
- // Only inline simple if's (no nested if or else).
- if (I + 2 != E && Line.First->is(tok::kw_if) &&
- I[2]->First->is(tok::kw_else))
- return 0;
- return 1;
- }
-
- unsigned
- tryMergeSimpleBlock(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
- SmallVectorImpl<AnnotatedLine *>::const_iterator E,
- unsigned Limit) {
- AnnotatedLine &Line = **I;
-
- // Don't merge ObjC @ keywords and methods.
- if (Line.First->isOneOf(tok::at, tok::minus, tok::plus))
- return 0;
-
- // Check that the current line allows merging. This depends on whether we
- // are in a control flow statements as well as several style flags.
- if (Line.First->isOneOf(tok::kw_else, tok::kw_case))
- return 0;
- if (Line.First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_do, tok::kw_try,
- tok::kw_catch, tok::kw_for, tok::r_brace)) {
- if (!Style.AllowShortBlocksOnASingleLine)
- return 0;
- if (!Style.AllowShortIfStatementsOnASingleLine &&
- Line.First->is(tok::kw_if))
- return 0;
- if (!Style.AllowShortLoopsOnASingleLine &&
- Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for))
- return 0;
- // FIXME: Consider an option to allow short exception handling clauses on
- // a single line.
- if (Line.First->isOneOf(tok::kw_try, tok::kw_catch))
- return 0;
- }
-
- FormatToken *Tok = I[1]->First;
- if (Tok->is(tok::r_brace) && !Tok->MustBreakBefore &&
- (Tok->getNextNonComment() == nullptr ||
- Tok->getNextNonComment()->is(tok::semi))) {
- // We merge empty blocks even if the line exceeds the column limit.
- Tok->SpacesRequiredBefore = 0;
- Tok->CanBreakBefore = true;
- return 1;
- } else if (Limit != 0 && Line.First->isNot(tok::kw_namespace)) {
- // We don't merge short records.
- if (Line.First->isOneOf(tok::kw_class, tok::kw_union, tok::kw_struct))
- return 0;
-
- // Check that we still have three lines and they fit into the limit.
- if (I + 2 == E || I[2]->Type == LT_Invalid)
- return 0;
- Limit = limitConsideringMacros(I + 2, E, Limit);
-
- if (!nextTwoLinesFitInto(I, Limit))
- return 0;
-
- // Second, check that the next line does not contain any braces - if it
- // does, readability declines when putting it into a single line.
- if (I[1]->Last->Type == TT_LineComment)
- return 0;
- do {
- if (Tok->is(tok::l_brace) && Tok->BlockKind != BK_BracedInit)
- return 0;
- Tok = Tok->Next;
- } while (Tok);
-
- // Last, check that the third line starts with a closing brace.
- Tok = I[2]->First;
- if (Tok->isNot(tok::r_brace))
- return 0;
-
- return 2;
- }
- return 0;
- }
-
- /// Returns the modified column limit for \p I if it is inside a macro and
- /// needs a trailing '\'.
- unsigned
- limitConsideringMacros(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
- SmallVectorImpl<AnnotatedLine *>::const_iterator E,
- unsigned Limit) {
- if (I[0]->InPPDirective && I + 1 != E &&
- !I[1]->First->HasUnescapedNewline && !I[1]->First->is(tok::eof)) {
- return Limit < 2 ? 0 : Limit - 2;
- }
- return Limit;
- }
-
- bool nextTwoLinesFitInto(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
- unsigned Limit) {
- if (I[1]->First->MustBreakBefore || I[2]->First->MustBreakBefore)
- return false;
- return 1 + I[1]->Last->TotalLength + 1 + I[2]->Last->TotalLength <= Limit;
- }
-
- bool containsMustBreak(const AnnotatedLine *Line) {
- for (const FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
- if (Tok->MustBreakBefore)
- return true;
- }
- return false;
- }
-
- const FormatStyle &Style;
-};
-
-class UnwrappedLineFormatter {
-public:
- UnwrappedLineFormatter(ContinuationIndenter *Indenter,
- WhitespaceManager *Whitespaces,
- const FormatStyle &Style)
- : Indenter(Indenter), Whitespaces(Whitespaces), Style(Style),
- Joiner(Style) {}
-
- unsigned format(const SmallVectorImpl<AnnotatedLine *> &Lines, bool DryRun,
- int AdditionalIndent = 0, bool FixBadIndentation = false) {
- // Try to look up already computed penalty in DryRun-mode.
- std::pair<const SmallVectorImpl<AnnotatedLine *> *, unsigned> CacheKey(
- &Lines, AdditionalIndent);
- auto CacheIt = PenaltyCache.find(CacheKey);
- if (DryRun && CacheIt != PenaltyCache.end())
- return CacheIt->second;
-
- assert(!Lines.empty());
- unsigned Penalty = 0;
- std::vector<int> IndentForLevel;
- for (unsigned i = 0, e = Lines[0]->Level; i != e; ++i)
- IndentForLevel.push_back(Style.IndentWidth * i + AdditionalIndent);
- const AnnotatedLine *PreviousLine = nullptr;
- for (SmallVectorImpl<AnnotatedLine *>::const_iterator I = Lines.begin(),
- E = Lines.end();
- I != E; ++I) {
- const AnnotatedLine &TheLine = **I;
- const FormatToken *FirstTok = TheLine.First;
- int Offset = getIndentOffset(*FirstTok);
-
- // Determine indent and try to merge multiple unwrapped lines.
- unsigned Indent;
- if (TheLine.InPPDirective) {
- Indent = TheLine.Level * Style.IndentWidth;
- } else {
- while (IndentForLevel.size() <= TheLine.Level)
- IndentForLevel.push_back(-1);
- IndentForLevel.resize(TheLine.Level + 1);
- Indent = getIndent(IndentForLevel, TheLine.Level);
- }
- unsigned LevelIndent = Indent;
- if (static_cast<int>(Indent) + Offset >= 0)
- Indent += Offset;
-
- // Merge multiple lines if possible.
- unsigned MergedLines = Joiner.tryFitMultipleLinesInOne(Indent, I, E);
- if (MergedLines > 0 && Style.ColumnLimit == 0) {
- // Disallow line merging if there is a break at the start of one of the
- // input lines.
- for (unsigned i = 0; i < MergedLines; ++i) {
- if (I[i + 1]->First->NewlinesBefore > 0)
- MergedLines = 0;
- }
- }
- if (!DryRun) {
- for (unsigned i = 0; i < MergedLines; ++i) {
- join(*I[i], *I[i + 1]);
- }
- }
- I += MergedLines;
-
- bool FixIndentation =
- FixBadIndentation && (LevelIndent != FirstTok->OriginalColumn);
- if (TheLine.First->is(tok::eof)) {
- if (PreviousLine && PreviousLine->Affected && !DryRun) {
- // Remove the file's trailing whitespace.
- unsigned Newlines = std::min(FirstTok->NewlinesBefore, 1u);
- Whitespaces->replaceWhitespace(*TheLine.First, Newlines,
- /*IndentLevel=*/0, /*Spaces=*/0,
- /*TargetColumn=*/0);
- }
- } else if (TheLine.Type != LT_Invalid &&
- (TheLine.Affected || FixIndentation)) {
- if (FirstTok->WhitespaceRange.isValid()) {
- if (!DryRun)
- formatFirstToken(*TheLine.First, PreviousLine, TheLine.Level,
- Indent, TheLine.InPPDirective);
- } else {
- Indent = LevelIndent = FirstTok->OriginalColumn;
- }
-
- // If everything fits on a single line, just put it there.
- unsigned ColumnLimit = Style.ColumnLimit;
- if (I + 1 != E) {
- AnnotatedLine *NextLine = I[1];
- if (NextLine->InPPDirective && !NextLine->First->HasUnescapedNewline)
- ColumnLimit = getColumnLimit(TheLine.InPPDirective);
- }
-
- if (TheLine.Last->TotalLength + Indent <= ColumnLimit) {
- LineState State = Indenter->getInitialState(Indent, &TheLine, DryRun);
- while (State.NextToken) {
- formatChildren(State, /*Newline=*/false, /*DryRun=*/false, Penalty);
- Indenter->addTokenToState(State, /*Newline=*/false, DryRun);
- }
- } else if (Style.ColumnLimit == 0) {
- // FIXME: Implement nested blocks for ColumnLimit = 0.
- NoColumnLimitFormatter Formatter(Indenter);
- if (!DryRun)
- Formatter.format(Indent, &TheLine);
- } else {
- Penalty += format(TheLine, Indent, DryRun);
- }
-
- if (!TheLine.InPPDirective)
- IndentForLevel[TheLine.Level] = LevelIndent;
- } else if (TheLine.ChildrenAffected) {
- format(TheLine.Children, DryRun);
- } else {
- // Format the first token if necessary, and notify the WhitespaceManager
- // about the unchanged whitespace.
- for (FormatToken *Tok = TheLine.First; Tok; Tok = Tok->Next) {
- if (Tok == TheLine.First &&
- (Tok->NewlinesBefore > 0 || Tok->IsFirst)) {
- unsigned LevelIndent = Tok->OriginalColumn;
- if (!DryRun) {
- // Remove trailing whitespace of the previous line.
- if ((PreviousLine && PreviousLine->Affected) ||
- TheLine.LeadingEmptyLinesAffected) {
- formatFirstToken(*Tok, PreviousLine, TheLine.Level, LevelIndent,
- TheLine.InPPDirective);
- } else {
- Whitespaces->addUntouchableToken(*Tok, TheLine.InPPDirective);
- }
- }
-
- if (static_cast<int>(LevelIndent) - Offset >= 0)
- LevelIndent -= Offset;
- if (Tok->isNot(tok::comment) && !TheLine.InPPDirective)
- IndentForLevel[TheLine.Level] = LevelIndent;
- } else if (!DryRun) {
- Whitespaces->addUntouchableToken(*Tok, TheLine.InPPDirective);
- }
- }
- }
- if (!DryRun) {
- for (FormatToken *Tok = TheLine.First; Tok; Tok = Tok->Next) {
- Tok->Finalized = true;
- }
- }
- PreviousLine = *I;
- }
- PenaltyCache[CacheKey] = Penalty;
- return Penalty;
- }
-
-private:
- /// \brief Formats an \c AnnotatedLine and returns the penalty.
- ///
- /// If \p DryRun is \c false, directly applies the changes.
- unsigned format(const AnnotatedLine &Line, unsigned FirstIndent,
- bool DryRun) {
- LineState State = Indenter->getInitialState(FirstIndent, &Line, DryRun);
-
- // If the ObjC method declaration does not fit on a line, we should format
- // it with one arg per line.
- if (State.Line->Type == LT_ObjCMethodDecl)
- State.Stack.back().BreakBeforeParameter = true;
-
- // Find best solution in solution space.
- return analyzeSolutionSpace(State, DryRun);
- }
-
- /// \brief An edge in the solution space from \c Previous->State to \c State,
- /// inserting a newline dependent on the \c NewLine.
- struct StateNode {
- StateNode(const LineState &State, bool NewLine, StateNode *Previous)
- : State(State), NewLine(NewLine), Previous(Previous) {}
- LineState State;
- bool NewLine;
- StateNode *Previous;
- };
-
- /// \brief A pair of <penalty, count> that is used to prioritize the BFS on.
- ///
- /// In case of equal penalties, we want to prefer states that were inserted
- /// first. During state generation we make sure that we insert states first
- /// that break the line as late as possible.
- typedef std::pair<unsigned, unsigned> OrderedPenalty;
-
- /// \brief An item in the prioritized BFS search queue. The \c StateNode's
- /// \c State has the given \c OrderedPenalty.
- typedef std::pair<OrderedPenalty, StateNode *> QueueItem;
-
- /// \brief The BFS queue type.
- typedef std::priority_queue<QueueItem, std::vector<QueueItem>,
- std::greater<QueueItem> > QueueType;
-
- /// \brief Get the offset of the line relatively to the level.
- ///
- /// For example, 'public:' labels in classes are offset by 1 or 2
- /// characters to the left from their level.
- int getIndentOffset(const FormatToken &RootToken) {
- if (RootToken.isAccessSpecifier(false) || RootToken.isObjCAccessSpecifier())
- return Style.AccessModifierOffset;
- return 0;
- }
-
- /// \brief Add a new line and the required indent before the first Token
- /// of the \c UnwrappedLine if there was no structural parsing error.
- void formatFirstToken(FormatToken &RootToken,
- const AnnotatedLine *PreviousLine, unsigned IndentLevel,
- unsigned Indent, bool InPPDirective) {
- unsigned Newlines =
- std::min(RootToken.NewlinesBefore, Style.MaxEmptyLinesToKeep + 1);
- // Remove empty lines before "}" where applicable.
- if (RootToken.is(tok::r_brace) &&
- (!RootToken.Next ||
- (RootToken.Next->is(tok::semi) && !RootToken.Next->Next)))
- Newlines = std::min(Newlines, 1u);
- if (Newlines == 0 && !RootToken.IsFirst)
- Newlines = 1;
- if (RootToken.IsFirst && !RootToken.HasUnescapedNewline)
- Newlines = 0;
-
- // Remove empty lines after "{".
- if (!Style.KeepEmptyLinesAtTheStartOfBlocks && PreviousLine &&
- PreviousLine->Last->is(tok::l_brace) &&
- PreviousLine->First->isNot(tok::kw_namespace))
- Newlines = 1;
-
- // Insert extra new line before access specifiers.
- if (PreviousLine && PreviousLine->Last->isOneOf(tok::semi, tok::r_brace) &&
- RootToken.isAccessSpecifier() && RootToken.NewlinesBefore == 1)
- ++Newlines;
-
- // Remove empty lines after access specifiers.
- if (PreviousLine && PreviousLine->First->isAccessSpecifier())
- Newlines = std::min(1u, Newlines);
-
- Whitespaces->replaceWhitespace(RootToken, Newlines, IndentLevel, Indent,
- Indent, InPPDirective &&
- !RootToken.HasUnescapedNewline);
- }
-
- /// \brief Get the indent of \p Level from \p IndentForLevel.
- ///
- /// \p IndentForLevel must contain the indent for the level \c l
- /// at \p IndentForLevel[l], or a value < 0 if the indent for
- /// that level is unknown.
- unsigned getIndent(const std::vector<int> IndentForLevel, unsigned Level) {
- if (IndentForLevel[Level] != -1)
- return IndentForLevel[Level];
- if (Level == 0)
- return 0;
- return getIndent(IndentForLevel, Level - 1) + Style.IndentWidth;
- }
-
- void join(AnnotatedLine &A, const AnnotatedLine &B) {
- assert(!A.Last->Next);
- assert(!B.First->Previous);
- if (B.Affected)
- A.Affected = true;
- A.Last->Next = B.First;
- B.First->Previous = A.Last;
- B.First->CanBreakBefore = true;
- unsigned LengthA = A.Last->TotalLength + B.First->SpacesRequiredBefore;
- for (FormatToken *Tok = B.First; Tok; Tok = Tok->Next) {
- Tok->TotalLength += LengthA;
- A.Last = Tok;
- }
- }
-
- unsigned getColumnLimit(bool InPPDirective) const {
- // In preprocessor directives reserve two chars for trailing " \"
- return Style.ColumnLimit - (InPPDirective ? 2 : 0);
- }
-
- struct CompareLineStatePointers {
- bool operator()(LineState *obj1, LineState *obj2) const {
- return *obj1 < *obj2;
- }
- };
-
- /// \brief Analyze the entire solution space starting from \p InitialState.
- ///
- /// This implements a variant of Dijkstra's algorithm on the graph that spans
- /// the solution space (\c LineStates are the nodes). The algorithm tries to
- /// find the shortest path (the one with lowest penalty) from \p InitialState
- /// to a state where all tokens are placed. Returns the penalty.
- ///
- /// If \p DryRun is \c false, directly applies the changes.
- unsigned analyzeSolutionSpace(LineState &InitialState, bool DryRun = false) {
- std::set<LineState *, CompareLineStatePointers> Seen;
-
- // Increasing count of \c StateNode items we have created. This is used to
- // create a deterministic order independent of the container.
- unsigned Count = 0;
- QueueType Queue;
-
- // Insert start element into queue.
- StateNode *Node =
- new (Allocator.Allocate()) StateNode(InitialState, false, nullptr);
- Queue.push(QueueItem(OrderedPenalty(0, Count), Node));
- ++Count;
-
- unsigned Penalty = 0;
-
- // While not empty, take first element and follow edges.
- while (!Queue.empty()) {
- Penalty = Queue.top().first.first;
- StateNode *Node = Queue.top().second;
- if (!Node->State.NextToken) {
- DEBUG(llvm::dbgs() << "\n---\nPenalty for line: " << Penalty << "\n");
- break;
- }
- Queue.pop();
-
- // Cut off the analysis of certain solutions if the analysis gets too
- // complex. See description of IgnoreStackForComparison.
- if (Count > 10000)
- Node->State.IgnoreStackForComparison = true;
-
- if (!Seen.insert(&Node->State).second)
- // State already examined with lower penalty.
- continue;
-
- FormatDecision LastFormat = Node->State.NextToken->Decision;
- if (LastFormat == FD_Unformatted || LastFormat == FD_Continue)
- addNextStateToQueue(Penalty, Node, /*NewLine=*/false, &Count, &Queue);
- if (LastFormat == FD_Unformatted || LastFormat == FD_Break)
- addNextStateToQueue(Penalty, Node, /*NewLine=*/true, &Count, &Queue);
- }
-
- if (Queue.empty()) {
- // We were unable to find a solution, do nothing.
- // FIXME: Add diagnostic?
- DEBUG(llvm::dbgs() << "Could not find a solution.\n");
- return 0;
- }
-
- // Reconstruct the solution.
- if (!DryRun)
- reconstructPath(InitialState, Queue.top().second);
-
- DEBUG(llvm::dbgs() << "Total number of analyzed states: " << Count << "\n");
- DEBUG(llvm::dbgs() << "---\n");
-
- return Penalty;
- }
-
- void reconstructPath(LineState &State, StateNode *Current) {
- std::deque<StateNode *> Path;
- // We do not need a break before the initial token.
- while (Current->Previous) {
- Path.push_front(Current);
- Current = Current->Previous;
- }
- for (std::deque<StateNode *>::iterator I = Path.begin(), E = Path.end();
- I != E; ++I) {
- unsigned Penalty = 0;
- formatChildren(State, (*I)->NewLine, /*DryRun=*/false, Penalty);
- Penalty += Indenter->addTokenToState(State, (*I)->NewLine, false);
-
- DEBUG({
- if ((*I)->NewLine) {
- llvm::dbgs() << "Penalty for placing "
- << (*I)->Previous->State.NextToken->Tok.getName() << ": "
- << Penalty << "\n";
- }
- });
- }
- }
-
- /// \brief Add the following state to the analysis queue \c Queue.
- ///
- /// Assume the current state is \p PreviousNode and has been reached with a
- /// penalty of \p Penalty. Insert a line break if \p NewLine is \c true.
- void addNextStateToQueue(unsigned Penalty, StateNode *PreviousNode,
- bool NewLine, unsigned *Count, QueueType *Queue) {
- if (NewLine && !Indenter->canBreak(PreviousNode->State))
- return;
- if (!NewLine && Indenter->mustBreak(PreviousNode->State))
- return;
-
- StateNode *Node = new (Allocator.Allocate())
- StateNode(PreviousNode->State, NewLine, PreviousNode);
- if (!formatChildren(Node->State, NewLine, /*DryRun=*/true, Penalty))
- return;
-
- Penalty += Indenter->addTokenToState(Node->State, NewLine, true);
-
- Queue->push(QueueItem(OrderedPenalty(Penalty, *Count), Node));
- ++(*Count);
- }
-
- /// \brief If the \p State's next token is an r_brace closing a nested block,
- /// format the nested block before it.
- ///
- /// Returns \c true if all children could be placed successfully and adapts
- /// \p Penalty as well as \p State. If \p DryRun is false, also directly
- /// creates changes using \c Whitespaces.
- ///
- /// The crucial idea here is that children always get formatted upon
- /// encountering the closing brace right after the nested block. Now, if we
- /// are currently trying to keep the "}" on the same line (i.e. \p NewLine is
- /// \c false), the entire block has to be kept on the same line (which is only
- /// possible if it fits on the line, only contains a single statement, etc.
- ///
- /// If \p NewLine is true, we format the nested block on separate lines, i.e.
- /// break after the "{", format all lines with correct indentation and the put
- /// the closing "}" on yet another new line.
- ///
- /// This enables us to keep the simple structure of the
- /// \c UnwrappedLineFormatter, where we only have two options for each token:
- /// break or don't break.
- bool formatChildren(LineState &State, bool NewLine, bool DryRun,
- unsigned &Penalty) {
- FormatToken &Previous = *State.NextToken->Previous;
- const FormatToken *LBrace = State.NextToken->getPreviousNonComment();
- if (!LBrace || LBrace->isNot(tok::l_brace) ||
- LBrace->BlockKind != BK_Block || Previous.Children.size() == 0)
- // The previous token does not open a block. Nothing to do. We don't
- // assert so that we can simply call this function for all tokens.
- return true;
-
- if (NewLine) {
- int AdditionalIndent =
- State.FirstIndent - State.Line->Level * Style.IndentWidth;
- if (State.Stack.size() < 2 ||
- !State.Stack[State.Stack.size() - 2].JSFunctionInlined) {
- AdditionalIndent = State.Stack.back().Indent -
- Previous.Children[0]->Level * Style.IndentWidth;
- }
-
- Penalty += format(Previous.Children, DryRun, AdditionalIndent,
- /*FixBadIndentation=*/true);
- return true;
- }
-
- // Cannot merge multiple statements into a single line.
- if (Previous.Children.size() > 1)
- return false;
-
- // Cannot merge into one line if this line ends on a comment.
- if (Previous.is(tok::comment))
- return false;
-
- // We can't put the closing "}" on a line with a trailing comment.
- if (Previous.Children[0]->Last->isTrailingComment())
- return false;
-
- // If the child line exceeds the column limit, we wouldn't want to merge it.
- // We add +2 for the trailing " }".
- if (Style.ColumnLimit > 0 &&
- Previous.Children[0]->Last->TotalLength + State.Column + 2 >
- Style.ColumnLimit)
- return false;
-
- if (!DryRun) {
- Whitespaces->replaceWhitespace(
- *Previous.Children[0]->First,
- /*Newlines=*/0, /*IndentLevel=*/0, /*Spaces=*/1,
- /*StartOfTokenColumn=*/State.Column, State.Line->InPPDirective);
- }
- Penalty += format(*Previous.Children[0], State.Column + 1, DryRun);
-
- State.Column += 1 + Previous.Children[0]->Last->TotalLength;
- return true;
- }
-
- ContinuationIndenter *Indenter;
- WhitespaceManager *Whitespaces;
- FormatStyle Style;
- LineJoiner Joiner;
-
- llvm::SpecificBumpPtrAllocator<StateNode> Allocator;
-
- // Cache to store the penalty of formatting a vector of AnnotatedLines
- // starting from a specific additional offset. Improves performance if there
- // are many nested blocks.
- std::map<std::pair<const SmallVectorImpl<AnnotatedLine *> *, unsigned>,
- unsigned> PenaltyCache;
-};
-
class FormatTokenLexer {
public:
- FormatTokenLexer(Lexer &Lex, SourceManager &SourceMgr, FormatStyle &Style,
+ FormatTokenLexer(SourceManager &SourceMgr, FileID ID, FormatStyle &Style,
encoding::Encoding Encoding)
: FormatTok(nullptr), IsFirstToken(true), GreaterStashed(false),
- Column(0), TrailingWhitespace(0), Lex(Lex), SourceMgr(SourceMgr),
- Style(Style), IdentTable(getFormattingLangOpts()), Encoding(Encoding),
- FirstInLineIndex(0) {
- Lex.SetKeepWhitespaceMode(true);
+ Column(0), TrailingWhitespace(0), SourceMgr(SourceMgr), ID(ID),
+ Style(Style), IdentTable(getFormattingLangOpts(Style)),
+ Keywords(IdentTable), Encoding(Encoding), FirstInLineIndex(0),
+ FormattingDisabled(false) {
+ Lex.reset(new Lexer(ID, SourceMgr.getBuffer(ID), SourceMgr,
+ getFormattingLangOpts(Style)));
+ Lex->SetKeepWhitespaceMode(true);
for (const std::string &ForEachMacro : Style.ForEachMacros)
ForEachMacros.push_back(&IdentTable.get(ForEachMacro));
@@ -1290,7 +625,7 @@ public:
return Tokens;
}
- IdentifierTable &getIdentTable() { return IdentTable; }
+ const AdditionalKeywords &getKeywords() { return Keywords; }
private:
void tryMergePreviousTokens() {
@@ -1300,10 +635,10 @@ private:
return;
if (Style.Language == FormatStyle::LK_JavaScript) {
- if (tryMergeEscapeSequence())
- return;
if (tryMergeJSRegexLiteral())
return;
+ if (tryMergeEscapeSequence())
+ return;
static tok::TokenKind JSIdentity[] = { tok::equalequal, tok::equal };
static tok::TokenKind JSNotIdentity[] = { tok::exclaimequal, tok::equal };
@@ -1351,14 +686,14 @@ private:
if (Tokens.size() < 2)
return false;
FormatToken *Previous = Tokens[Tokens.size() - 2];
- if (Previous->isNot(tok::unknown) || Previous->TokenText != "\\" ||
- Tokens.back()->NewlinesBefore != 0)
+ if (Previous->isNot(tok::unknown) || Previous->TokenText != "\\")
return false;
- Previous->ColumnWidth += Tokens.back()->ColumnWidth;
+ ++Previous->ColumnWidth;
StringRef Text = Previous->TokenText;
- Previous->TokenText =
- StringRef(Text.data(), Text.size() + Tokens.back()->TokenText.size());
+ Previous->TokenText = StringRef(Text.data(), Text.size() + 1);
+ resetLexer(SourceMgr.getFileOffset(Tokens.back()->Tok.getLocation()) + 1);
Tokens.resize(Tokens.size() - 1);
+ Column = Previous->OriginalColumn + Previous->ColumnWidth;
return true;
}
@@ -1368,9 +703,18 @@ private:
// "(;,{}![:?", a binary operator or 'return', as those cannot be followed by
// a division.
bool tryMergeJSRegexLiteral() {
- if (Tokens.size() < 2 || Tokens.back()->isNot(tok::slash) ||
- (Tokens[Tokens.size() - 2]->is(tok::unknown) &&
- Tokens[Tokens.size() - 2]->TokenText == "\\"))
+ if (Tokens.size() < 2)
+ return false;
+ // If a regex literal ends in "\//", this gets represented by an unknown
+ // token "\" and a comment.
+ bool MightEndWithEscapedSlash =
+ Tokens.back()->is(tok::comment) &&
+ Tokens.back()->TokenText.startswith("//") &&
+ Tokens[Tokens.size() - 2]->TokenText == "\\";
+ if (!MightEndWithEscapedSlash &&
+ (Tokens.back()->isNot(tok::slash) ||
+ (Tokens[Tokens.size() - 2]->is(tok::unknown) &&
+ Tokens[Tokens.size() - 2]->TokenText == "\\")))
return false;
unsigned TokenCount = 0;
unsigned LastColumn = Tokens.back()->OriginalColumn;
@@ -1381,6 +725,12 @@ private:
tok::exclaim, tok::l_square, tok::colon, tok::comma,
tok::question, tok::kw_return) ||
I[1]->isBinaryOperator())) {
+ if (MightEndWithEscapedSlash) {
+ // This regex literal ends in '\//'. Skip past the '//' of the last
+ // token and re-start lexing from there.
+ SourceLocation Loc = Tokens.back()->Tok.getLocation();
+ resetLexer(SourceMgr.getFileOffset(Loc) + 2);
+ }
Tokens.resize(Tokens.size() - TokenCount);
Tokens.back()->Tok.setKind(tok::unknown);
Tokens.back()->Type = TT_RegexLiteral;
@@ -1544,7 +894,6 @@ private:
Column += Style.TabWidth - Column % Style.TabWidth;
break;
case '\\':
- ++Column;
if (i + 1 == e || (FormatTok->TokenText[i + 1] != '\r' &&
FormatTok->TokenText[i + 1] != '\n'))
FormatTok->Type = TT_ImplicitStringLiteral;
@@ -1556,7 +905,7 @@ private:
}
}
- if (FormatTok->Type == TT_ImplicitStringLiteral)
+ if (FormatTok->is(TT_ImplicitStringLiteral))
break;
WhitespaceLength += FormatTok->Tok.getLength();
@@ -1590,6 +939,11 @@ private:
IdentifierInfo &Info = IdentTable.get(FormatTok->TokenText);
FormatTok->Tok.setIdentifierInfo(&Info);
FormatTok->Tok.setKind(Info.getTokenID());
+ if (Style.Language == FormatStyle::LK_Java &&
+ FormatTok->isOneOf(tok::kw_struct, tok::kw_union, tok::kw_delete)) {
+ FormatTok->Tok.setKind(tok::identifier);
+ FormatTok->Tok.setIdentifierInfo(nullptr);
+ }
} else if (FormatTok->Tok.is(tok::greatergreater)) {
FormatTok->Tok.setKind(tok::greater);
FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
@@ -1633,10 +987,12 @@ private:
bool GreaterStashed;
unsigned Column;
unsigned TrailingWhitespace;
- Lexer &Lex;
+ std::unique_ptr<Lexer> Lex;
SourceManager &SourceMgr;
+ FileID ID;
FormatStyle &Style;
IdentifierTable IdentTable;
+ AdditionalKeywords Keywords;
encoding::Encoding Encoding;
llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
// Index (in 'Tokens') of the last token that starts a new line.
@@ -1644,8 +1000,10 @@ private:
SmallVector<FormatToken *, 16> Tokens;
SmallVector<IdentifierInfo *, 8> ForEachMacros;
+ bool FormattingDisabled;
+
void readRawToken(FormatToken &Tok) {
- Lex.LexFromRawLexer(Tok.Tok);
+ Lex->LexFromRawLexer(Tok.Tok);
Tok.TokenText = StringRef(SourceMgr.getCharacterData(Tok.Tok.getLocation()),
Tok.Tok.getLength());
// For formatting, treat unterminated string literals like normal string
@@ -1659,6 +1017,26 @@ private:
Tok.Tok.setKind(tok::char_constant);
}
}
+
+ if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format on" ||
+ Tok.TokenText == "/* clang-format on */")) {
+ FormattingDisabled = false;
+ }
+
+ Tok.Finalized = FormattingDisabled;
+
+ if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format off" ||
+ Tok.TokenText == "/* clang-format off */")) {
+ FormattingDisabled = true;
+ }
+ }
+
+ void resetLexer(unsigned Offset) {
+ StringRef Buffer = SourceMgr.getBufferData(ID);
+ Lex.reset(new Lexer(SourceMgr.getLocForStartOfFile(ID),
+ getFormattingLangOpts(Style), Buffer.begin(),
+ Buffer.begin() + Offset, Buffer.end()));
+ Lex->SetKeepWhitespaceMode(true);
}
};
@@ -1666,6 +1044,8 @@ static StringRef getLanguageName(FormatStyle::LanguageKind Language) {
switch (Language) {
case FormatStyle::LK_Cpp:
return "C++";
+ case FormatStyle::LK_Java:
+ return "Java";
case FormatStyle::LK_JavaScript:
return "JavaScript";
case FormatStyle::LK_Proto:
@@ -1677,12 +1057,13 @@ static StringRef getLanguageName(FormatStyle::LanguageKind Language) {
class Formatter : public UnwrappedLineConsumer {
public:
- Formatter(const FormatStyle &Style, Lexer &Lex, SourceManager &SourceMgr,
- const std::vector<CharSourceRange> &Ranges)
- : Style(Style), Lex(Lex), SourceMgr(SourceMgr),
- Whitespaces(SourceMgr, Style, inputUsesCRLF(Lex.getBuffer())),
+ Formatter(const FormatStyle &Style, SourceManager &SourceMgr, FileID ID,
+ ArrayRef<CharSourceRange> Ranges)
+ : Style(Style), ID(ID), SourceMgr(SourceMgr),
+ Whitespaces(SourceMgr, Style,
+ inputUsesCRLF(SourceMgr.getBufferData(ID))),
Ranges(Ranges.begin(), Ranges.end()), UnwrappedLines(1),
- Encoding(encoding::detectEncoding(Lex.getBuffer())) {
+ Encoding(encoding::detectEncoding(SourceMgr.getBufferData(ID))) {
DEBUG(llvm::dbgs() << "File encoding: "
<< (Encoding == encoding::Encoding_UTF8 ? "UTF8"
: "unknown")
@@ -1693,9 +1074,10 @@ public:
tooling::Replacements format() {
tooling::Replacements Result;
- FormatTokenLexer Tokens(Lex, SourceMgr, Style, Encoding);
+ FormatTokenLexer Tokens(SourceMgr, ID, Style, Encoding);
- UnwrappedLineParser Parser(Style, Tokens.lex(), *this);
+ UnwrappedLineParser Parser(Style, Tokens.getKeywords(), Tokens.lex(),
+ *this);
bool StructuralError = Parser.parse();
assert(UnwrappedLines.rbegin()->empty());
for (unsigned Run = 0, RunE = UnwrappedLines.size(); Run + 1 != RunE;
@@ -1726,7 +1108,7 @@ public:
tooling::Replacements format(SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
bool StructuralError, FormatTokenLexer &Tokens) {
- TokenAnnotator Annotator(Style, Tokens.getIdentTable().get("in"));
+ TokenAnnotator Annotator(Style, Tokens.getKeywords());
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
Annotator.annotate(*AnnotatedLines[i]);
}
@@ -1737,7 +1119,8 @@ public:
computeAffectedLines(AnnotatedLines.begin(), AnnotatedLines.end());
Annotator.setCommentLineLevels(AnnotatedLines);
- ContinuationIndenter Indenter(Style, SourceMgr, Whitespaces, Encoding,
+ ContinuationIndenter Indenter(Style, Tokens.getKeywords(), SourceMgr,
+ Whitespaces, Encoding,
BinPackInconclusiveFunctions);
UnwrappedLineFormatter Formatter(&Indenter, &Whitespaces, Style);
Formatter.format(AnnotatedLines, /*DryRun=*/false);
@@ -1852,8 +1235,7 @@ private:
if (!IncludeLeadingNewlines)
Start = Start.getLocWithOffset(First.LastNewlineOffset);
SourceLocation End = Last.getStartOfNonWhitespace();
- if (Last.TokenText.size() > 0)
- End = End.getLocWithOffset(Last.TokenText.size() - 1);
+ End = End.getLocWithOffset(Last.TokenText.size());
CharSourceRange Range = CharSourceRange::getCharRange(Start, End);
return affectsCharSourceRange(Range);
}
@@ -1895,7 +1277,7 @@ private:
continue;
FormatToken *Tok = AnnotatedLines[i]->First->Next;
while (Tok->Next) {
- if (Tok->Type == TT_PointerOrReference) {
+ if (Tok->is(TT_PointerOrReference)) {
bool SpacesBefore =
Tok->WhitespaceRange.getBegin() != Tok->WhitespaceRange.getEnd();
bool SpacesAfter = Tok->Next->WhitespaceRange.getBegin() !=
@@ -1907,11 +1289,10 @@ private:
}
if (Tok->WhitespaceRange.getBegin() == Tok->WhitespaceRange.getEnd()) {
- if (Tok->is(tok::coloncolon) &&
- Tok->Previous->Type == TT_TemplateOpener)
+ if (Tok->is(tok::coloncolon) && Tok->Previous->is(TT_TemplateOpener))
HasCpp03IncompatibleFormat = true;
- if (Tok->Type == TT_TemplateCloser &&
- Tok->Previous->Type == TT_TemplateCloser)
+ if (Tok->is(TT_TemplateCloser) &&
+ Tok->Previous->is(TT_TemplateCloser))
HasCpp03IncompatibleFormat = true;
}
@@ -1947,7 +1328,7 @@ private:
}
FormatStyle Style;
- Lexer &Lex;
+ FileID ID;
SourceManager &SourceMgr;
WhitespaceManager Whitespaces;
SmallVector<CharSourceRange, 8> Ranges;
@@ -1961,49 +1342,59 @@ private:
tooling::Replacements reformat(const FormatStyle &Style, Lexer &Lex,
SourceManager &SourceMgr,
- std::vector<CharSourceRange> Ranges) {
- if (Style.DisableFormat) {
- tooling::Replacements EmptyResult;
- return EmptyResult;
- }
+ ArrayRef<CharSourceRange> Ranges) {
+ if (Style.DisableFormat)
+ return tooling::Replacements();
+ return reformat(Style, SourceMgr,
+ SourceMgr.getFileID(Lex.getSourceLocation()), Ranges);
+}
- Formatter formatter(Style, Lex, SourceMgr, Ranges);
+tooling::Replacements reformat(const FormatStyle &Style,
+ SourceManager &SourceMgr, FileID ID,
+ ArrayRef<CharSourceRange> Ranges) {
+ if (Style.DisableFormat)
+ return tooling::Replacements();
+ Formatter formatter(Style, SourceMgr, ID, Ranges);
return formatter.format();
}
tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
- std::vector<tooling::Range> Ranges,
+ ArrayRef<tooling::Range> Ranges,
StringRef FileName) {
+ if (Style.DisableFormat)
+ return tooling::Replacements();
+
FileManager Files((FileSystemOptions()));
DiagnosticsEngine Diagnostics(
IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
new DiagnosticOptions);
SourceManager SourceMgr(Diagnostics, Files);
- llvm::MemoryBuffer *Buf = llvm::MemoryBuffer::getMemBuffer(Code, FileName);
+ std::unique_ptr<llvm::MemoryBuffer> Buf =
+ llvm::MemoryBuffer::getMemBuffer(Code, FileName);
const clang::FileEntry *Entry =
Files.getVirtualFile(FileName, Buf->getBufferSize(), 0);
- SourceMgr.overrideFileContents(Entry, Buf);
+ SourceMgr.overrideFileContents(Entry, std::move(Buf));
FileID ID =
SourceMgr.createFileID(Entry, SourceLocation(), clang::SrcMgr::C_User);
- Lexer Lex(ID, SourceMgr.getBuffer(ID), SourceMgr,
- getFormattingLangOpts(Style.Standard));
SourceLocation StartOfFile = SourceMgr.getLocForStartOfFile(ID);
std::vector<CharSourceRange> CharRanges;
- for (unsigned i = 0, e = Ranges.size(); i != e; ++i) {
- SourceLocation Start = StartOfFile.getLocWithOffset(Ranges[i].getOffset());
- SourceLocation End = Start.getLocWithOffset(Ranges[i].getLength());
+ for (const tooling::Range &Range : Ranges) {
+ SourceLocation Start = StartOfFile.getLocWithOffset(Range.getOffset());
+ SourceLocation End = Start.getLocWithOffset(Range.getLength());
CharRanges.push_back(CharSourceRange::getCharRange(Start, End));
}
- return reformat(Style, Lex, SourceMgr, CharRanges);
+ return reformat(Style, SourceMgr, ID, CharRanges);
}
-LangOptions getFormattingLangOpts(FormatStyle::LanguageStandard Standard) {
+LangOptions getFormattingLangOpts(const FormatStyle &Style) {
LangOptions LangOpts;
LangOpts.CPlusPlus = 1;
- LangOpts.CPlusPlus11 = Standard == FormatStyle::LS_Cpp03 ? 0 : 1;
- LangOpts.CPlusPlus1y = Standard == FormatStyle::LS_Cpp03 ? 0 : 1;
+ LangOpts.CPlusPlus11 = Style.Standard == FormatStyle::LS_Cpp03 ? 0 : 1;
+ LangOpts.CPlusPlus14 = Style.Standard == FormatStyle::LS_Cpp03 ? 0 : 1;
LangOpts.LineComment = 1;
- LangOpts.CXXOperatorNames = 1;
+ bool AlternativeOperators = Style.Language != FormatStyle::LK_JavaScript &&
+ Style.Language != FormatStyle::LK_Java;
+ LangOpts.CXXOperatorNames = AlternativeOperators ? 1 : 0;
LangOpts.Bool = 1;
LangOpts.ObjC1 = 1;
LangOpts.ObjC2 = 1;
@@ -2022,7 +1413,9 @@ const char *StyleOptionHelpDescription =
" -style=\"{BasedOnStyle: llvm, IndentWidth: 8}\"";
static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
- if (FileName.endswith_lower(".js")) {
+ if (FileName.endswith(".java")) {
+ return FormatStyle::LK_Java;
+ } else if (FileName.endswith_lower(".js")) {
return FormatStyle::LK_JavaScript;
} else if (FileName.endswith_lower(".proto") ||
FileName.endswith_lower(".protodevel")) {
diff --git a/lib/Format/FormatToken.cpp b/lib/Format/FormatToken.cpp
index c91d25f46de1..badb3a39c82c 100644
--- a/lib/Format/FormatToken.cpp
+++ b/lib/Format/FormatToken.cpp
@@ -131,9 +131,15 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
if (!Token->MatchingParen || Token->isNot(tok::l_brace))
return;
- // In C++11 braced list style, we should not format in columns unless we allow
- // bin-packing of function parameters.
- if (Style.Cpp11BracedListStyle && !Style.BinPackParameters)
+ // In C++11 braced list style, we should not format in columns unless they
+ // have many items (20 or more) or we allow bin-packing of function
+ // parameters.
+ if (Style.Cpp11BracedListStyle && !Style.BinPackParameters &&
+ Commas.size() < 19)
+ return;
+
+ // Column format doesn't really make sense if we don't align after brackets.
+ if (!Style.AlignAfterOpenBracket)
return;
FormatToken *ItemBegin = Token->Next;
@@ -143,6 +149,9 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// trailing comments which are otherwise ignored for column alignment.
SmallVector<unsigned, 8> EndOfLineItemLength;
+ unsigned MinItemLength = Style.ColumnLimit;
+ unsigned MaxItemLength = 0;
+
for (unsigned i = 0, e = Commas.size() + 1; i != e; ++i) {
// Skip comments on their own line.
while (ItemBegin->HasUnescapedNewline && ItemBegin->isTrailingComment())
@@ -169,6 +178,9 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
ItemEnd = Commas[i];
// The comma is counted as part of the item when calculating the length.
ItemLengths.push_back(CodePointsBetween(ItemBegin, ItemEnd));
+ MinItemLength = std::min(MinItemLength, ItemLengths.back());
+ MaxItemLength = std::max(MaxItemLength, ItemLengths.back());
+
// Consume trailing comments so the are included in EndOfLineItemLength.
if (ItemEnd->Next && !ItemEnd->Next->HasUnescapedNewline &&
ItemEnd->Next->isTrailingComment())
@@ -184,8 +196,10 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// If this doesn't have a nested list, we require at least 6 elements in order
// create a column layout. If it has a nested list, column layout ensures one
- // list element per line.
- if (HasNestedBracedList || Commas.size() < 5 || Token->NestingLevel != 0)
+ // list element per line. If the difference between the shortest and longest
+ // element is too large, column layout would create too much whitespace.
+ if (HasNestedBracedList || Commas.size() < 5 || Token->NestingLevel != 0 ||
+ MaxItemLength - MinItemLength > 10)
return;
// We can never place more than ColumnLimit / 3 items in a row (because of the
diff --git a/lib/Format/FormatToken.h b/lib/Format/FormatToken.h
index c376c5009559..4811e02dd228 100644
--- a/lib/Format/FormatToken.h
+++ b/lib/Format/FormatToken.h
@@ -13,9 +13,10 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FORMAT_FORMAT_TOKEN_H
-#define LLVM_CLANG_FORMAT_FORMAT_TOKEN_H
+#ifndef LLVM_CLANG_LIB_FORMAT_FORMATTOKEN_H
+#define LLVM_CLANG_LIB_FORMAT_FORMATTOKEN_H
+#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Format/Format.h"
#include "clang/Lex/Lexer.h"
@@ -46,7 +47,10 @@ enum TokenType {
TT_ImplicitStringLiteral,
TT_InheritanceColon,
TT_InlineASMColon,
+ TT_JavaAnnotation,
+ TT_LambdaArrow,
TT_LambdaLSquare,
+ TT_LeadingJavaAnnotation,
TT_LineComment,
TT_ObjCBlockLBrace,
TT_ObjCBlockLParen,
@@ -267,29 +271,36 @@ struct FormatToken {
bool IsForEachMacro;
bool is(tok::TokenKind Kind) const { return Tok.is(Kind); }
-
- bool isOneOf(tok::TokenKind K1, tok::TokenKind K2) const {
+ bool is(TokenType TT) const { return Type == TT; }
+ bool is(const IdentifierInfo *II) const {
+ return II && II == Tok.getIdentifierInfo();
+ }
+ template <typename A, typename B> bool isOneOf(A K1, B K2) const {
return is(K1) || is(K2);
}
-
- bool isOneOf(tok::TokenKind K1, tok::TokenKind K2, tok::TokenKind K3) const {
+ template <typename A, typename B, typename C>
+ bool isOneOf(A K1, B K2, C K3) const {
return is(K1) || is(K2) || is(K3);
}
-
- bool isOneOf(tok::TokenKind K1, tok::TokenKind K2, tok::TokenKind K3,
- tok::TokenKind K4, tok::TokenKind K5 = tok::NUM_TOKENS,
- tok::TokenKind K6 = tok::NUM_TOKENS,
- tok::TokenKind K7 = tok::NUM_TOKENS,
- tok::TokenKind K8 = tok::NUM_TOKENS,
- tok::TokenKind K9 = tok::NUM_TOKENS,
- tok::TokenKind K10 = tok::NUM_TOKENS,
- tok::TokenKind K11 = tok::NUM_TOKENS,
- tok::TokenKind K12 = tok::NUM_TOKENS) const {
+ template <typename A, typename B, typename C, typename D>
+ bool isOneOf(A K1, B K2, C K3, D K4) const {
+ return is(K1) || is(K2) || is(K3) || is(K4);
+ }
+ template <typename A, typename B, typename C, typename D, typename E>
+ bool isOneOf(A K1, B K2, C K3, D K4, E K5) const {
+ return is(K1) || is(K2) || is(K3) || is(K4) || is(K5);
+ }
+ template <typename T>
+ bool isOneOf(T K1, T K2, T K3, T K4, T K5, T K6, T K7 = tok::NUM_TOKENS,
+ T K8 = tok::NUM_TOKENS, T K9 = tok::NUM_TOKENS,
+ T K10 = tok::NUM_TOKENS, T K11 = tok::NUM_TOKENS,
+ T K12 = tok::NUM_TOKENS) const {
return is(K1) || is(K2) || is(K3) || is(K4) || is(K5) || is(K6) || is(K7) ||
is(K8) || is(K9) || is(K10) || is(K11) || is(K12);
}
- bool isNot(tok::TokenKind Kind) const { return Tok.isNot(Kind); }
+ template <typename T> bool isNot(T Kind) const { return !is(Kind); }
+
bool isStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); }
bool isObjCAtKeyword(tok::ObjCKeywordKind Kind) const {
@@ -313,19 +324,19 @@ struct FormatToken {
/// \brief Returns whether \p Tok is ([{ or a template opening <.
bool opensScope() const {
- return isOneOf(tok::l_paren, tok::l_brace, tok::l_square) ||
- Type == TT_TemplateOpener;
+ return isOneOf(tok::l_paren, tok::l_brace, tok::l_square,
+ TT_TemplateOpener);
}
/// \brief Returns whether \p Tok is )]} or a template closing >.
bool closesScope() const {
- return isOneOf(tok::r_paren, tok::r_brace, tok::r_square) ||
- Type == TT_TemplateCloser;
+ return isOneOf(tok::r_paren, tok::r_brace, tok::r_square,
+ TT_TemplateCloser);
}
/// \brief Returns \c true if this is a "." or "->" accessing a member.
bool isMemberAccess() const {
return isOneOf(tok::arrow, tok::period, tok::arrowstar) &&
- Type != TT_DesignatedInitializerPeriod;
+ !isOneOf(TT_DesignatedInitializerPeriod, TT_TrailingReturnArrow);
}
bool isUnaryOperator() const {
@@ -350,7 +361,28 @@ struct FormatToken {
}
bool isTrailingComment() const {
- return is(tok::comment) && (!Next || Next->NewlinesBefore > 0);
+ return is(tok::comment) &&
+ (is(TT_LineComment) || !Next || Next->NewlinesBefore > 0);
+ }
+
+ /// \brief Returns \c true if this is a keyword that can be used
+ /// like a function call (e.g. sizeof, typeid, ...).
+ bool isFunctionLikeKeyword() const {
+ switch (Tok.getKind()) {
+ case tok::kw_throw:
+ case tok::kw_typeid:
+ case tok::kw_return:
+ case tok::kw_sizeof:
+ case tok::kw_alignof:
+ case tok::kw_alignas:
+ case tok::kw_decltype:
+ case tok::kw_noexcept:
+ case tok::kw_static_assert:
+ case tok::kw___attribute:
+ return true;
+ default:
+ return false;
+ }
}
prec::Level getPrecedence() const {
@@ -376,10 +408,10 @@ struct FormatToken {
/// \brief Returns \c true if this tokens starts a block-type list, i.e. a
/// list that should be indented with a block indent.
bool opensBlockTypeList(const FormatStyle &Style) const {
- return Type == TT_ArrayInitializerLSquare ||
+ return is(TT_ArrayInitializerLSquare) ||
(is(tok::l_brace) &&
- (BlockKind == BK_Block || Type == TT_DictLiteral ||
- !Style.Cpp11BracedListStyle));
+ (BlockKind == BK_Block || is(TT_DictLiteral) ||
+ (!Style.Cpp11BracedListStyle && NestingLevel == 0)));
}
/// \brief Same as opensBlockTypeList, but for the closing token.
@@ -499,7 +531,71 @@ private:
bool HasNestedBracedList;
};
+/// \brief Encapsulates keywords that are context sensitive or for languages not
+/// properly supported by Clang's lexer.
+struct AdditionalKeywords {
+ AdditionalKeywords(IdentifierTable &IdentTable) {
+ kw_in = &IdentTable.get("in");
+ kw_CF_ENUM = &IdentTable.get("CF_ENUM");
+ kw_CF_OPTIONS = &IdentTable.get("CF_OPTIONS");
+ kw_NS_ENUM = &IdentTable.get("NS_ENUM");
+ kw_NS_OPTIONS = &IdentTable.get("NS_OPTIONS");
+
+ kw_finally = &IdentTable.get("finally");
+ kw_function = &IdentTable.get("function");
+ kw_var = &IdentTable.get("var");
+
+ kw_abstract = &IdentTable.get("abstract");
+ kw_extends = &IdentTable.get("extends");
+ kw_final = &IdentTable.get("final");
+ kw_implements = &IdentTable.get("implements");
+ kw_instanceof = &IdentTable.get("instanceof");
+ kw_interface = &IdentTable.get("interface");
+ kw_native = &IdentTable.get("native");
+ kw_package = &IdentTable.get("package");
+ kw_synchronized = &IdentTable.get("synchronized");
+ kw_throws = &IdentTable.get("throws");
+
+ kw_option = &IdentTable.get("option");
+ kw_optional = &IdentTable.get("optional");
+ kw_repeated = &IdentTable.get("repeated");
+ kw_required = &IdentTable.get("required");
+ kw_returns = &IdentTable.get("returns");
+ }
+
+ // ObjC context sensitive keywords.
+ IdentifierInfo *kw_in;
+ IdentifierInfo *kw_CF_ENUM;
+ IdentifierInfo *kw_CF_OPTIONS;
+ IdentifierInfo *kw_NS_ENUM;
+ IdentifierInfo *kw_NS_OPTIONS;
+
+ // JavaScript keywords.
+ IdentifierInfo *kw_finally;
+ IdentifierInfo *kw_function;
+ IdentifierInfo *kw_var;
+
+ // Java keywords.
+ IdentifierInfo *kw_abstract;
+ IdentifierInfo *kw_extends;
+ IdentifierInfo *kw_final;
+ IdentifierInfo *kw_implements;
+ IdentifierInfo *kw_instanceof;
+ IdentifierInfo *kw_interface;
+ IdentifierInfo *kw_native;
+ IdentifierInfo *kw_package;
+ IdentifierInfo *kw_synchronized;
+ IdentifierInfo *kw_throws;
+
+ // Proto keywords.
+ IdentifierInfo *kw_option;
+ IdentifierInfo *kw_optional;
+ IdentifierInfo *kw_repeated;
+ IdentifierInfo *kw_required;
+ IdentifierInfo *kw_returns;
+};
+
} // namespace format
} // namespace clang
-#endif // LLVM_CLANG_FORMAT_FORMAT_TOKEN_H
+#endif
diff --git a/lib/Format/TokenAnnotator.cpp b/lib/Format/TokenAnnotator.cpp
index 017afe1a370b..4ba3f9196977 100644
--- a/lib/Format/TokenAnnotator.cpp
+++ b/lib/Format/TokenAnnotator.cpp
@@ -32,9 +32,9 @@ namespace {
class AnnotatingParser {
public:
AnnotatingParser(const FormatStyle &Style, AnnotatedLine &Line,
- IdentifierInfo &Ident_in)
- : Style(Style), Line(Line), CurrentToken(Line.First),
- KeywordVirtualFound(false), AutoFound(false), Ident_in(Ident_in) {
+ const AdditionalKeywords &Keywords)
+ : Style(Style), Line(Line), CurrentToken(Line.First), AutoFound(false),
+ Keywords(Keywords) {
Contexts.push_back(Context(tok::unknown, 1, /*IsExpression=*/false));
resetTokenMetadata(CurrentToken);
}
@@ -51,6 +51,10 @@ private:
Contexts.back().InTemplateArgument =
Left->Previous && Left->Previous->Tok.isNot(tok::kw_template);
+ if (Style.Language == FormatStyle::LK_Java &&
+ CurrentToken->is(tok::question))
+ next();
+
while (CurrentToken) {
if (CurrentToken->is(tok::greater)) {
Left->MatchingParen = CurrentToken;
@@ -59,8 +63,13 @@ private:
next();
return true;
}
+ if (CurrentToken->is(tok::question) &&
+ Style.Language == FormatStyle::LK_Java) {
+ next();
+ continue;
+ }
if (CurrentToken->isOneOf(tok::r_paren, tok::r_square, tok::r_brace,
- tok::question, tok::colon))
+ tok::colon, tok::question))
return false;
// If a && or || is found and interpreted as a binary operator, this set
// of angles is likely part of something like "a < b && c > d". If the
@@ -69,12 +78,8 @@ private:
// parameters.
// FIXME: This is getting out of hand, write a decent parser.
if (CurrentToken->Previous->isOneOf(tok::pipepipe, tok::ampamp) &&
- ((CurrentToken->Previous->Type == TT_BinaryOperator &&
- // Toplevel bool expressions do not make lots of sense;
- // If we're on the top level, it contains only the base context and
- // the context for the current opening angle bracket.
- Contexts.size() > 2) ||
- Contexts[Contexts.size() - 2].IsExpression) &&
+ CurrentToken->Previous->is(TT_BinaryOperator) &&
+ Contexts[Contexts.size() - 2].IsExpression &&
Line.First->isNot(tok::kw_template))
return false;
updateParameterCount(Left, CurrentToken);
@@ -109,17 +114,17 @@ private:
if (Left->Previous &&
(Left->Previous->isOneOf(tok::kw_static_assert, tok::kw_if,
tok::kw_while, tok::l_paren, tok::comma) ||
- Left->Previous->Type == TT_BinaryOperator)) {
+ Left->Previous->is(TT_BinaryOperator))) {
// static_assert, if and while usually contain expressions.
Contexts.back().IsExpression = true;
} else if (Line.InPPDirective &&
(!Left->Previous ||
- (Left->Previous->isNot(tok::identifier) &&
- Left->Previous->Type != TT_OverloadedOperator))) {
+ !Left->Previous->isOneOf(tok::identifier,
+ TT_OverloadedOperator))) {
Contexts.back().IsExpression = true;
} else if (Left->Previous && Left->Previous->is(tok::r_square) &&
Left->Previous->MatchingParen &&
- Left->Previous->MatchingParen->Type == TT_LambdaLSquare) {
+ Left->Previous->MatchingParen->is(TT_LambdaLSquare)) {
// This is a parameter list of a lambda expression.
Contexts.back().IsExpression = false;
} else if (Contexts[Contexts.size() - 2].CaretFound) {
@@ -131,6 +136,9 @@ private:
// The first argument to a foreach macro is a declaration.
Contexts.back().IsForEachMacro = true;
Contexts.back().IsExpression = false;
+ } else if (Left->Previous && Left->Previous->MatchingParen &&
+ Left->Previous->MatchingParen->is(TT_ObjCBlockLParen)) {
+ Contexts.back().IsExpression = false;
}
if (StartsObjCMethodExpr) {
@@ -160,11 +168,11 @@ private:
}
}
- if (CurrentToken->Previous->Type == TT_PointerOrReference &&
+ if (CurrentToken->Previous->is(TT_PointerOrReference) &&
CurrentToken->Previous->Previous->isOneOf(tok::l_paren,
tok::coloncolon))
MightBeFunctionType = true;
- if (CurrentToken->Previous->Type == TT_BinaryOperator)
+ if (CurrentToken->Previous->is(TT_BinaryOperator))
Contexts.back().IsExpression = true;
if (CurrentToken->is(tok::r_paren)) {
if (MightBeFunctionType && CurrentToken->Next &&
@@ -183,8 +191,12 @@ private:
}
}
- if (Left->Type == TT_AttributeParen)
+ if (Left->is(TT_AttributeParen))
CurrentToken->Type = TT_AttributeParen;
+ if (Left->Previous && Left->Previous->is(TT_JavaAnnotation))
+ CurrentToken->Type = TT_JavaAnnotation;
+ if (Left->Previous && Left->Previous->is(TT_LeadingJavaAnnotation))
+ CurrentToken->Type = TT_LeadingJavaAnnotation;
if (!HasMultipleLines)
Left->PackingKind = PPK_Inconclusive;
@@ -227,12 +239,13 @@ private:
FormatToken *Left = CurrentToken->Previous;
FormatToken *Parent = Left->getPreviousNonComment();
bool StartsObjCMethodExpr =
- Contexts.back().CanBeExpression && Left->Type != TT_LambdaLSquare &&
+ Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) &&
CurrentToken->isNot(tok::l_brace) &&
- (!Parent || Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren,
- tok::kw_return, tok::kw_throw) ||
- Parent->isUnaryOperator() || Parent->Type == TT_ObjCForIn ||
- Parent->Type == TT_CastRParen ||
+ (!Parent ||
+ Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren,
+ tok::kw_return, tok::kw_throw) ||
+ Parent->isUnaryOperator() ||
+ Parent->isOneOf(TT_ObjCForIn, TT_CastRParen) ||
getBinOpPrecedence(Parent->Tok.getKind(), true, true) > prec::Unknown);
ScopedContextCreator ContextCreator(*this, tok::l_square, 10);
Contexts.back().IsExpression = true;
@@ -243,14 +256,14 @@ private:
Left->Type = TT_ObjCMethodExpr;
} else if (Parent && Parent->is(tok::at)) {
Left->Type = TT_ArrayInitializerLSquare;
- } else if (Left->Type == TT_Unknown) {
+ } else if (Left->is(TT_Unknown)) {
Left->Type = TT_ArraySubscriptLSquare;
}
while (CurrentToken) {
if (CurrentToken->is(tok::r_square)) {
if (CurrentToken->Next && CurrentToken->Next->is(tok::l_paren) &&
- Left->Type == TT_ObjCMethodExpr) {
+ Left->is(TT_ObjCMethodExpr)) {
// An ObjC method call is rarely followed by an open parenthesis.
// FIXME: Do we incorrectly label ":" with this?
StartsObjCMethodExpr = false;
@@ -261,7 +274,7 @@ private:
// determineStarAmpUsage() thinks that '*' '[' is allocating an
// array of pointers, but if '[' starts a selector then '*' is a
// binary operator.
- if (Parent && Parent->Type == TT_PointerOrReference)
+ if (Parent && Parent->is(TT_PointerOrReference))
Parent->Type = TT_BinaryOperator;
}
Left->MatchingParen = CurrentToken;
@@ -277,14 +290,22 @@ private:
}
if (CurrentToken->isOneOf(tok::r_paren, tok::r_brace))
return false;
- if (CurrentToken->is(tok::colon))
+ if (CurrentToken->is(tok::colon)) {
+ if (Left->is(TT_ArraySubscriptLSquare)) {
+ Left->Type = TT_ObjCMethodExpr;
+ StartsObjCMethodExpr = true;
+ Contexts.back().ColonIsObjCMethodExpr = true;
+ if (Parent && Parent->is(tok::r_paren))
+ Parent->Type = TT_CastRParen;
+ }
ColonFound = true;
+ }
if (CurrentToken->is(tok::comma) &&
Style.Language != FormatStyle::LK_Proto &&
- (Left->Type == TT_ArraySubscriptLSquare ||
- (Left->Type == TT_ObjCMethodExpr && !ColonFound)))
+ (Left->is(TT_ArraySubscriptLSquare) ||
+ (Left->is(TT_ObjCMethodExpr) && !ColonFound)))
Left->Type = TT_ArrayInitializerLSquare;
- FormatToken* Tok = CurrentToken;
+ FormatToken *Tok = CurrentToken;
if (!consumeToken())
return false;
updateParameterCount(Left, Tok);
@@ -315,11 +336,14 @@ private:
if (CurrentToken->isOneOf(tok::r_paren, tok::r_square))
return false;
updateParameterCount(Left, CurrentToken);
- if (CurrentToken->is(tok::colon) &&
- Style.Language != FormatStyle::LK_Proto) {
- if (CurrentToken->getPreviousNonComment()->is(tok::identifier))
- CurrentToken->getPreviousNonComment()->Type = TT_SelectorName;
- Left->Type = TT_DictLiteral;
+ if (CurrentToken->isOneOf(tok::colon, tok::l_brace)) {
+ FormatToken *Previous = CurrentToken->getPreviousNonComment();
+ if ((CurrentToken->is(tok::colon) ||
+ Style.Language == FormatStyle::LK_Proto) &&
+ Previous->is(tok::identifier))
+ Previous->Type = TT_SelectorName;
+ if (CurrentToken->is(tok::colon))
+ Left->Type = TT_DictLiteral;
}
if (!consumeToken())
return false;
@@ -329,10 +353,10 @@ private:
}
void updateParameterCount(FormatToken *Left, FormatToken *Current) {
- if (Current->Type == TT_LambdaLSquare ||
- (Current->is(tok::caret) && Current->Type == TT_UnaryOperator) ||
+ if (Current->is(TT_LambdaLSquare) ||
+ (Current->is(tok::caret) && Current->is(TT_UnaryOperator)) ||
(Style.Language == FormatStyle::LK_JavaScript &&
- Current->TokenText == "function")) {
+ Current->is(Keywords.kw_function))) {
++Left->BlockParameterCount;
}
if (Current->is(tok::comma)) {
@@ -390,7 +414,7 @@ private:
} else if (Contexts.back().ColonIsDictLiteral) {
Tok->Type = TT_DictLiteral;
} else if (Contexts.back().ColonIsObjCMethodExpr ||
- Line.First->Type == TT_ObjCMethodSpecifier) {
+ Line.First->is(TT_ObjCMethodSpecifier)) {
Tok->Type = TT_ObjCMethodExpr;
Tok->Previous->Type = TT_SelectorName;
if (Tok->Previous->ColumnWidth >
@@ -406,6 +430,11 @@ private:
} else if (Contexts.size() == 1 &&
!Line.First->isOneOf(tok::kw_enum, tok::kw_case)) {
Tok->Type = TT_InheritanceColon;
+ } else if (Tok->Previous->is(tok::identifier) && Tok->Next &&
+ Tok->Next->isOneOf(tok::r_paren, tok::comma)) {
+ // This handles a special macro in ObjC code where selectors including
+ // the colon are passed as macro arguments.
+ Tok->Type = TT_ObjCMethodExpr;
} else if (Contexts.back().ContextKind == tok::l_paren) {
Tok->Type = TT_InlineASMColon;
}
@@ -428,9 +457,9 @@ private:
if (!parseParens())
return false;
if (Line.MustBeDeclaration && Contexts.size() == 1 &&
- !Contexts.back().IsExpression &&
- Line.First->Type != TT_ObjCProperty &&
- (!Tok->Previous || Tok->Previous->isNot(tok::kw_decltype)))
+ !Contexts.back().IsExpression && Line.First->isNot(TT_ObjCProperty) &&
+ (!Tok->Previous ||
+ !Tok->Previous->isOneOf(tok::kw_decltype, TT_LeadingJavaAnnotation)))
Line.MightBeFunctionDecl = true;
break;
case tok::l_square:
@@ -442,9 +471,12 @@ private:
return false;
break;
case tok::less:
- if (Tok->Previous && !Tok->Previous->Tok.isLiteral() && parseAngle())
+ if ((!Tok->Previous ||
+ (!Tok->Previous->Tok.isLiteral() &&
+ !(Tok->Previous->is(tok::r_paren) && Contexts.size() > 1))) &&
+ parseAngle()) {
Tok->Type = TT_TemplateOpener;
- else {
+ } else {
Tok->Type = TT_BinaryOperator;
CurrentToken = Tok;
next();
@@ -467,12 +499,12 @@ private:
if (CurrentToken->isOneOf(tok::star, tok::amp))
CurrentToken->Type = TT_PointerOrReference;
consumeToken();
- if (CurrentToken && CurrentToken->Previous->Type == TT_BinaryOperator)
+ if (CurrentToken && CurrentToken->Previous->is(TT_BinaryOperator))
CurrentToken->Previous->Type = TT_OverloadedOperator;
}
if (CurrentToken) {
CurrentToken->Type = TT_OverloadedOperatorLParen;
- if (CurrentToken->Previous->Type == TT_BinaryOperator)
+ if (CurrentToken->Previous->is(TT_BinaryOperator))
CurrentToken->Previous->Type = TT_OverloadedOperator;
}
break;
@@ -483,8 +515,8 @@ private:
parseTemplateDeclaration();
break;
case tok::identifier:
- if (Line.First->is(tok::kw_for) &&
- Tok->Tok.getIdentifierInfo() == &Ident_in)
+ if (Line.First->is(tok::kw_for) && Tok->is(Keywords.kw_in) &&
+ Tok->Previous->isNot(tok::colon))
Tok->Type = TT_ObjCForIn;
break;
case tok::comma:
@@ -502,7 +534,6 @@ private:
}
void parseIncludeDirective() {
- next();
if (CurrentToken && CurrentToken->is(tok::less)) {
next();
while (CurrentToken) {
@@ -510,14 +541,6 @@ private:
CurrentToken->Type = TT_ImplicitStringLiteral;
next();
}
- } else {
- while (CurrentToken) {
- if (CurrentToken->is(tok::string_literal))
- // Mark these string literals as "implicit" literals, too, so that
- // they are not split or line-wrapped.
- CurrentToken->Type = TT_ImplicitStringLiteral;
- next();
- }
}
}
@@ -544,22 +567,25 @@ private:
}
}
- void parsePreprocessorDirective() {
+ LineType parsePreprocessorDirective() {
+ LineType Type = LT_PreprocessorDirective;
next();
if (!CurrentToken)
- return;
+ return Type;
if (CurrentToken->Tok.is(tok::numeric_constant)) {
CurrentToken->SpacesRequiredBefore = 1;
- return;
+ return Type;
}
// Hashes in the middle of a line can lead to any strange token
// sequence.
if (!CurrentToken->Tok.getIdentifierInfo())
- return;
+ return Type;
switch (CurrentToken->Tok.getIdentifierInfo()->getPPKeywordID()) {
case tok::pp_include:
case tok::pp_import:
+ next();
parseIncludeDirective();
+ Type = LT_ImportStatement;
break;
case tok::pp_error:
case tok::pp_warning:
@@ -578,33 +604,53 @@ private:
}
while (CurrentToken)
next();
+ return Type;
}
public:
LineType parseLine() {
if (CurrentToken->is(tok::hash)) {
- parsePreprocessorDirective();
- return LT_PreprocessorDirective;
+ return parsePreprocessorDirective();
}
// Directly allow to 'import <string-literal>' to support protocol buffer
// definitions (code.google.com/p/protobuf) or missing "#" (either way we
// should not break the line).
IdentifierInfo *Info = CurrentToken->Tok.getIdentifierInfo();
- if (Info && Info->getPPKeywordID() == tok::pp_import &&
- CurrentToken->Next && CurrentToken->Next->is(tok::string_literal))
+ if ((Style.Language == FormatStyle::LK_Java &&
+ CurrentToken->is(Keywords.kw_package)) ||
+ (Info && Info->getPPKeywordID() == tok::pp_import &&
+ CurrentToken->Next &&
+ CurrentToken->Next->isOneOf(tok::string_literal, tok::identifier,
+ tok::kw_static))) {
+ next();
parseIncludeDirective();
+ return LT_ImportStatement;
+ }
+ // If this line starts and ends in '<' and '>', respectively, it is likely
+ // part of "#define <a/b.h>".
+ if (CurrentToken->is(tok::less) && Line.Last->is(tok::greater)) {
+ parseIncludeDirective();
+ return LT_ImportStatement;
+ }
+
+ bool KeywordVirtualFound = false;
+ bool ImportStatement = false;
while (CurrentToken) {
if (CurrentToken->is(tok::kw_virtual))
KeywordVirtualFound = true;
+ if (IsImportStatement(*CurrentToken))
+ ImportStatement = true;
if (!consumeToken())
return LT_Invalid;
}
if (KeywordVirtualFound)
return LT_VirtualFunctionDecl;
+ if (ImportStatement)
+ return LT_ImportStatement;
- if (Line.First->Type == TT_ObjCMethodSpecifier) {
+ if (Line.First->is(TT_ObjCMethodSpecifier)) {
if (Contexts.back().FirstObjCSelectorName)
Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
Contexts.back().LongestObjCSelectorName;
@@ -615,17 +661,26 @@ public:
}
private:
+ bool IsImportStatement(const FormatToken &Tok) {
+ // FIXME: Closure-library specific stuff should not be hard-coded but be
+ // configurable.
+ return Style.Language == FormatStyle::LK_JavaScript &&
+ Tok.TokenText == "goog" && Tok.Next && Tok.Next->is(tok::period) &&
+ Tok.Next->Next && (Tok.Next->Next->TokenText == "module" ||
+ Tok.Next->Next->TokenText == "require" ||
+ Tok.Next->Next->TokenText == "provide") &&
+ Tok.Next->Next->Next && Tok.Next->Next->Next->is(tok::l_paren);
+ }
+
void resetTokenMetadata(FormatToken *Token) {
if (!Token)
return;
// Reset token type in case we have already looked at it and then
// recovered from an error (e.g. failure to find the matching >).
- if (CurrentToken->Type != TT_LambdaLSquare &&
- CurrentToken->Type != TT_FunctionLBrace &&
- CurrentToken->Type != TT_ImplicitStringLiteral &&
- CurrentToken->Type != TT_RegexLiteral &&
- CurrentToken->Type != TT_TrailingReturnArrow)
+ if (!CurrentToken->isOneOf(TT_LambdaLSquare, TT_FunctionLBrace,
+ TT_ImplicitStringLiteral, TT_RegexLiteral,
+ TT_TrailingReturnArrow))
CurrentToken->Type = TT_Unknown;
CurrentToken->Role.reset();
CurrentToken->FakeLParens.clear();
@@ -634,9 +689,10 @@ private:
void next() {
if (CurrentToken) {
- determineTokenType(*CurrentToken);
- CurrentToken->BindingStrength = Contexts.back().BindingStrength;
CurrentToken->NestingLevel = Contexts.size() - 1;
+ CurrentToken->BindingStrength = Contexts.back().BindingStrength;
+ modifyContext(*CurrentToken);
+ determineTokenType(*CurrentToken);
CurrentToken = CurrentToken->Next;
}
@@ -688,23 +744,29 @@ private:
~ScopedContextCreator() { P.Contexts.pop_back(); }
};
- void determineTokenType(FormatToken &Current) {
+ void modifyContext(const FormatToken &Current) {
if (Current.getPrecedence() == prec::Assignment &&
- !Line.First->isOneOf(tok::kw_template, tok::kw_using) &&
+ !Line.First->isOneOf(tok::kw_template, tok::kw_using,
+ TT_UnaryOperator) &&
(!Current.Previous || Current.Previous->isNot(tok::kw_operator))) {
Contexts.back().IsExpression = true;
for (FormatToken *Previous = Current.Previous;
Previous && !Previous->isOneOf(tok::comma, tok::semi);
Previous = Previous->Previous) {
- if (Previous->isOneOf(tok::r_square, tok::r_paren))
+ if (Previous->isOneOf(tok::r_square, tok::r_paren)) {
Previous = Previous->MatchingParen;
- if (Previous->Type == TT_BinaryOperator &&
- Previous->isOneOf(tok::star, tok::amp)) {
- Previous->Type = TT_PointerOrReference;
+ if (!Previous)
+ break;
}
+ if (Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator) &&
+ Previous->isOneOf(tok::star, tok::amp) && Previous->Previous &&
+ Previous->Previous->isNot(tok::equal))
+ Previous->Type = TT_PointerOrReference;
}
} else if (Current.isOneOf(tok::kw_return, tok::kw_throw)) {
Contexts.back().IsExpression = true;
+ } else if (Current.is(TT_TrailingReturnArrow)) {
+ Contexts.back().IsExpression = false;
} else if (Current.is(tok::l_paren) && !Line.MustBeDeclaration &&
!Line.InPPDirective &&
(!Current.Previous ||
@@ -712,7 +774,7 @@ private:
bool ParametersOfFunctionType =
Current.Previous && Current.Previous->is(tok::r_paren) &&
Current.Previous->MatchingParen &&
- Current.Previous->MatchingParen->Type == TT_FunctionTypeLParen;
+ Current.Previous->MatchingParen->is(TT_FunctionTypeLParen);
bool IsForOrCatch = Current.Previous &&
Current.Previous->isOneOf(tok::kw_for, tok::kw_catch);
Contexts.back().IsExpression = !ParametersOfFunctionType && !IsForOrCatch;
@@ -721,8 +783,10 @@ private:
Previous && Previous->isOneOf(tok::star, tok::amp);
Previous = Previous->Previous)
Previous->Type = TT_PointerOrReference;
+ if (Line.MustBeDeclaration)
+ Contexts.back().IsExpression = Contexts.front().InCtorInitializer;
} else if (Current.Previous &&
- Current.Previous->Type == TT_CtorInitializerColon) {
+ Current.Previous->is(TT_CtorInitializerColon)) {
Contexts.back().IsExpression = true;
Contexts.back().InCtorInitializer = true;
} else if (Current.is(tok::kw_new)) {
@@ -731,70 +795,99 @@ private:
// This should be the condition or increment in a for-loop.
Contexts.back().IsExpression = true;
}
+ }
+
+ void determineTokenType(FormatToken &Current) {
+ if (!Current.is(TT_Unknown))
+ // The token type is already known.
+ return;
- if (Current.Type == TT_Unknown) {
+ // Line.MightBeFunctionDecl can only be true after the parentheses of a
+ // function declaration have been found. In this case, 'Current' is a
+ // trailing token of this declaration and thus cannot be a name.
+ if (Current.is(Keywords.kw_instanceof)) {
+ Current.Type = TT_BinaryOperator;
+ } else if (isStartOfName(Current) &&
+ (!Line.MightBeFunctionDecl || Current.NestingLevel != 0)) {
+ Contexts.back().FirstStartOfName = &Current;
+ Current.Type = TT_StartOfName;
+ } else if (Current.is(tok::kw_auto)) {
+ AutoFound = true;
+ } else if (Current.is(tok::arrow) &&
+ Style.Language == FormatStyle::LK_Java) {
+ Current.Type = TT_LambdaArrow;
+ } else if (Current.is(tok::arrow) && AutoFound && Line.MustBeDeclaration &&
+ Current.NestingLevel == 0) {
+ Current.Type = TT_TrailingReturnArrow;
+ } else if (Current.isOneOf(tok::star, tok::amp, tok::ampamp)) {
+ Current.Type =
+ determineStarAmpUsage(Current, Contexts.back().CanBeExpression &&
+ Contexts.back().IsExpression,
+ Contexts.back().InTemplateArgument);
+ } else if (Current.isOneOf(tok::minus, tok::plus, tok::caret)) {
+ Current.Type = determinePlusMinusCaretUsage(Current);
+ if (Current.is(TT_UnaryOperator) && Current.is(tok::caret))
+ Contexts.back().CaretFound = true;
+ } else if (Current.isOneOf(tok::minusminus, tok::plusplus)) {
+ Current.Type = determineIncrementUsage(Current);
+ } else if (Current.isOneOf(tok::exclaim, tok::tilde)) {
+ Current.Type = TT_UnaryOperator;
+ } else if (Current.is(tok::question)) {
+ Current.Type = TT_ConditionalExpr;
+ } else if (Current.isBinaryOperator() &&
+ (!Current.Previous || Current.Previous->isNot(tok::l_square))) {
+ Current.Type = TT_BinaryOperator;
+ } else if (Current.is(tok::comment)) {
+ if (Current.TokenText.startswith("//"))
+ Current.Type = TT_LineComment;
+ else
+ Current.Type = TT_BlockComment;
+ } else if (Current.is(tok::r_paren)) {
+ if (rParenEndsCast(Current))
+ Current.Type = TT_CastRParen;
+ } else if (Current.is(tok::at) && Current.Next) {
+ switch (Current.Next->Tok.getObjCKeywordID()) {
+ case tok::objc_interface:
+ case tok::objc_implementation:
+ case tok::objc_protocol:
+ Current.Type = TT_ObjCDecl;
+ break;
+ case tok::objc_property:
+ Current.Type = TT_ObjCProperty;
+ break;
+ default:
+ break;
+ }
+ } else if (Current.is(tok::period)) {
+ FormatToken *PreviousNoComment = Current.getPreviousNonComment();
+ if (PreviousNoComment &&
+ PreviousNoComment->isOneOf(tok::comma, tok::l_brace))
+ Current.Type = TT_DesignatedInitializerPeriod;
+ else if (Style.Language == FormatStyle::LK_Java && Current.Previous &&
+ Current.Previous->isOneOf(TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation)) {
+ Current.Type = Current.Previous->Type;
+ }
+ } else if (Current.isOneOf(tok::identifier, tok::kw_const) &&
+ Current.Previous &&
+ !Current.Previous->isOneOf(tok::equal, tok::at) &&
+ Line.MightBeFunctionDecl && Contexts.size() == 1) {
// Line.MightBeFunctionDecl can only be true after the parentheses of a
- // function declaration have been found. In this case, 'Current' is a
- // trailing token of this declaration and thus cannot be a name.
- if (isStartOfName(Current) && !Line.MightBeFunctionDecl) {
- Contexts.back().FirstStartOfName = &Current;
- Current.Type = TT_StartOfName;
- } else if (Current.is(tok::kw_auto)) {
- AutoFound = true;
- } else if (Current.is(tok::arrow) && AutoFound &&
- Line.MustBeDeclaration) {
- Current.Type = TT_TrailingReturnArrow;
- } else if (Current.isOneOf(tok::star, tok::amp, tok::ampamp)) {
- Current.Type =
- determineStarAmpUsage(Current, Contexts.back().CanBeExpression &&
- Contexts.back().IsExpression,
- Contexts.back().InTemplateArgument);
- } else if (Current.isOneOf(tok::minus, tok::plus, tok::caret)) {
- Current.Type = determinePlusMinusCaretUsage(Current);
- if (Current.Type == TT_UnaryOperator && Current.is(tok::caret))
- Contexts.back().CaretFound = true;
- } else if (Current.isOneOf(tok::minusminus, tok::plusplus)) {
- Current.Type = determineIncrementUsage(Current);
- } else if (Current.is(tok::exclaim)) {
- Current.Type = TT_UnaryOperator;
- } else if (Current.is(tok::question)) {
- Current.Type = TT_ConditionalExpr;
- } else if (Current.isBinaryOperator() &&
- (!Current.Previous ||
- Current.Previous->isNot(tok::l_square))) {
- Current.Type = TT_BinaryOperator;
- } else if (Current.is(tok::comment)) {
- if (Current.TokenText.startswith("//"))
- Current.Type = TT_LineComment;
+ // function declaration have been found.
+ Current.Type = TT_TrailingAnnotation;
+ } else if (Style.Language == FormatStyle::LK_Java && Current.Previous) {
+ if (Current.Previous->is(tok::at) &&
+ Current.isNot(Keywords.kw_interface)) {
+ const FormatToken &AtToken = *Current.Previous;
+ const FormatToken *Previous = AtToken.getPreviousNonComment();
+ if (!Previous || Previous->is(TT_LeadingJavaAnnotation))
+ Current.Type = TT_LeadingJavaAnnotation;
else
- Current.Type = TT_BlockComment;
- } else if (Current.is(tok::r_paren)) {
- if (rParenEndsCast(Current))
- Current.Type = TT_CastRParen;
- } else if (Current.is(tok::at) && Current.Next) {
- switch (Current.Next->Tok.getObjCKeywordID()) {
- case tok::objc_interface:
- case tok::objc_implementation:
- case tok::objc_protocol:
- Current.Type = TT_ObjCDecl;
- break;
- case tok::objc_property:
- Current.Type = TT_ObjCProperty;
- break;
- default:
- break;
- }
- } else if (Current.is(tok::period)) {
- FormatToken *PreviousNoComment = Current.getPreviousNonComment();
- if (PreviousNoComment &&
- PreviousNoComment->isOneOf(tok::comma, tok::l_brace))
- Current.Type = TT_DesignatedInitializerPeriod;
- } else if (Current.isOneOf(tok::identifier, tok::kw_const) &&
- Current.Previous && Current.Previous->isNot(tok::equal) &&
- Line.MightBeFunctionDecl && Contexts.size() == 1) {
- // Line.MightBeFunctionDecl can only be true after the parentheses of a
- // function declaration have been found.
- Current.Type = TT_TrailingAnnotation;
+ Current.Type = TT_JavaAnnotation;
+ } else if (Current.Previous->is(tok::period) &&
+ Current.Previous->isOneOf(TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation)) {
+ Current.Type = Current.Previous->Type;
}
}
}
@@ -808,6 +901,9 @@ private:
if (Tok.isNot(tok::identifier) || !Tok.Previous)
return false;
+ if (Tok.Previous->is(TT_LeadingJavaAnnotation))
+ return false;
+
// Skip "const" as it does not have an influence on whether this is a name.
FormatToken *PreviousNotConst = Tok.Previous;
while (PreviousNotConst && PreviousNotConst->is(tok::kw_const))
@@ -820,9 +916,10 @@ private:
PreviousNotConst->Previous &&
PreviousNotConst->Previous->is(tok::hash);
- if (PreviousNotConst->Type == TT_TemplateCloser)
+ if (PreviousNotConst->is(TT_TemplateCloser))
return PreviousNotConst && PreviousNotConst->MatchingParen &&
PreviousNotConst->MatchingParen->Previous &&
+ PreviousNotConst->MatchingParen->Previous->isNot(tok::period) &&
PreviousNotConst->MatchingParen->Previous->isNot(tok::kw_template);
if (PreviousNotConst->is(tok::r_paren) && PreviousNotConst->MatchingParen &&
@@ -831,7 +928,7 @@ private:
return true;
return (!IsPPKeyword && PreviousNotConst->is(tok::identifier)) ||
- PreviousNotConst->Type == TT_PointerOrReference ||
+ PreviousNotConst->is(TT_PointerOrReference) ||
PreviousNotConst->isSimpleTypeSpecifier();
}
@@ -840,14 +937,28 @@ private:
FormatToken *LeftOfParens = nullptr;
if (Tok.MatchingParen)
LeftOfParens = Tok.MatchingParen->getPreviousNonComment();
- if (LeftOfParens && LeftOfParens->is(tok::r_paren))
+ if (LeftOfParens && LeftOfParens->is(tok::r_paren) &&
+ LeftOfParens->MatchingParen)
+ LeftOfParens = LeftOfParens->MatchingParen->Previous;
+ if (LeftOfParens && LeftOfParens->is(tok::r_square) &&
+ LeftOfParens->MatchingParen &&
+ LeftOfParens->MatchingParen->is(TT_LambdaLSquare))
return false;
+ if (Tok.Next) {
+ if (Tok.Next->is(tok::question))
+ return false;
+ if (Style.Language == FormatStyle::LK_JavaScript &&
+ Tok.Next->is(Keywords.kw_in))
+ return false;
+ if (Style.Language == FormatStyle::LK_Java && Tok.Next->is(tok::l_paren))
+ return true;
+ }
bool IsCast = false;
bool ParensAreEmpty = Tok.Previous == Tok.MatchingParen;
- bool ParensAreType = !Tok.Previous ||
- Tok.Previous->Type == TT_PointerOrReference ||
- Tok.Previous->Type == TT_TemplateCloser ||
- Tok.Previous->isSimpleTypeSpecifier();
+ bool ParensAreType =
+ !Tok.Previous ||
+ Tok.Previous->isOneOf(TT_PointerOrReference, TT_TemplateCloser) ||
+ Tok.Previous->isSimpleTypeSpecifier();
bool ParensCouldEndDecl =
Tok.Next && Tok.Next->isOneOf(tok::equal, tok::semi, tok::l_brace);
bool IsSizeOfOrAlignOf =
@@ -862,12 +973,11 @@ private:
IsCast = true;
// If there is an identifier after the (), it is likely a cast, unless
// there is also an identifier before the ().
- else if (LeftOfParens &&
+ else if (LeftOfParens && Tok.Next &&
(LeftOfParens->Tok.getIdentifierInfo() == nullptr ||
LeftOfParens->is(tok::kw_return)) &&
- LeftOfParens->Type != TT_OverloadedOperator &&
- LeftOfParens->isNot(tok::at) &&
- LeftOfParens->Type != TT_TemplateCloser && Tok.Next) {
+ !LeftOfParens->isOneOf(TT_OverloadedOperator, tok::at,
+ TT_TemplateCloser)) {
if (Tok.Next->isOneOf(tok::identifier, tok::numeric_constant)) {
IsCast = true;
} else {
@@ -879,8 +989,9 @@ private:
if (Prev && Tok.Next && Tok.Next->Next) {
bool NextIsUnary = Tok.Next->isUnaryOperator() ||
Tok.Next->isOneOf(tok::amp, tok::star);
- IsCast = NextIsUnary && Tok.Next->Next->isOneOf(
- tok::identifier, tok::numeric_constant);
+ IsCast =
+ NextIsUnary && !Tok.Next->is(tok::plus) &&
+ Tok.Next->Next->isOneOf(tok::identifier, tok::numeric_constant);
}
for (; Prev != Tok.MatchingParen; Prev = Prev->Previous) {
@@ -897,29 +1008,31 @@ private:
/// \brief Return the type of the given token assuming it is * or &.
TokenType determineStarAmpUsage(const FormatToken &Tok, bool IsExpression,
bool InTemplateArgument) {
+ if (Style.Language == FormatStyle::LK_JavaScript)
+ return TT_BinaryOperator;
+
const FormatToken *PrevToken = Tok.getPreviousNonComment();
if (!PrevToken)
return TT_UnaryOperator;
const FormatToken *NextToken = Tok.getNextNonComment();
- if (!NextToken || NextToken->is(tok::l_brace))
+ if (!NextToken ||
+ (NextToken->is(tok::l_brace) && !NextToken->getNextNonComment()))
return TT_Unknown;
- if (PrevToken->is(tok::coloncolon) ||
- (PrevToken->is(tok::l_paren) && !IsExpression))
+ if (PrevToken->is(tok::coloncolon))
return TT_PointerOrReference;
if (PrevToken->isOneOf(tok::l_paren, tok::l_square, tok::l_brace,
tok::comma, tok::semi, tok::kw_return, tok::colon,
tok::equal, tok::kw_delete, tok::kw_sizeof) ||
- PrevToken->Type == TT_BinaryOperator ||
- PrevToken->Type == TT_ConditionalExpr ||
- PrevToken->Type == TT_UnaryOperator || PrevToken->Type == TT_CastRParen)
+ PrevToken->isOneOf(TT_BinaryOperator, TT_ConditionalExpr,
+ TT_UnaryOperator, TT_CastRParen))
return TT_UnaryOperator;
- if (NextToken->is(tok::l_square) && NextToken->Type != TT_LambdaLSquare)
+ if (NextToken->is(tok::l_square) && NextToken->isNot(TT_LambdaLSquare))
return TT_PointerOrReference;
- if (NextToken->is(tok::kw_operator))
+ if (NextToken->isOneOf(tok::kw_operator, tok::comma))
return TT_PointerOrReference;
if (PrevToken->is(tok::r_paren) && PrevToken->MatchingParen &&
@@ -930,7 +1043,7 @@ private:
if (PrevToken->Tok.isLiteral() ||
PrevToken->isOneOf(tok::r_paren, tok::r_square, tok::kw_true,
- tok::kw_false) ||
+ tok::kw_false, tok::r_brace) ||
NextToken->Tok.isLiteral() ||
NextToken->isOneOf(tok::kw_true, tok::kw_false) ||
NextToken->isUnaryOperator() ||
@@ -940,6 +1053,10 @@ private:
(InTemplateArgument && NextToken->Tok.isAnyIdentifier()))
return TT_BinaryOperator;
+ // "&&(" is quite unlikely to be two successive unary "&".
+ if (Tok.is(tok::ampamp) && NextToken && NextToken->is(tok::l_paren))
+ return TT_BinaryOperator;
+
// This catches some cases where evaluation order is used as control flow:
// aaa && aaa->f();
const FormatToken *NextNextToken = NextToken->getNextNonComment();
@@ -948,7 +1065,7 @@ private:
// It is very unlikely that we are going to find a pointer or reference type
// definition on the RHS of an assignment.
- if (IsExpression)
+ if (IsExpression && !Contexts.back().CaretFound)
return TT_BinaryOperator;
return TT_PointerOrReference;
@@ -956,7 +1073,7 @@ private:
TokenType determinePlusMinusCaretUsage(const FormatToken &Tok) {
const FormatToken *PrevToken = Tok.getPreviousNonComment();
- if (!PrevToken || PrevToken->Type == TT_CastRParen)
+ if (!PrevToken || PrevToken->is(TT_CastRParen))
return TT_UnaryOperator;
// Use heuristics to recognize unary operators.
@@ -966,7 +1083,7 @@ private:
return TT_UnaryOperator;
// There can't be two consecutive binary operators.
- if (PrevToken->Type == TT_BinaryOperator)
+ if (PrevToken->is(TT_BinaryOperator))
return TT_UnaryOperator;
// Fall back to marking the token as binary operator.
@@ -976,7 +1093,7 @@ private:
/// \brief Determine whether ++/-- are pre- or post-increments/-decrements.
TokenType determineIncrementUsage(const FormatToken &Tok) {
const FormatToken *PrevToken = Tok.getPreviousNonComment();
- if (!PrevToken || PrevToken->Type == TT_CastRParen)
+ if (!PrevToken || PrevToken->is(TT_CastRParen))
return TT_UnaryOperator;
if (PrevToken->isOneOf(tok::r_paren, tok::r_square, tok::identifier))
return TT_TrailingUnaryOperator;
@@ -989,9 +1106,8 @@ private:
const FormatStyle &Style;
AnnotatedLine &Line;
FormatToken *CurrentToken;
- bool KeywordVirtualFound;
bool AutoFound;
- IdentifierInfo &Ident_in;
+ const AdditionalKeywords &Keywords;
};
static int PrecedenceUnaryOperator = prec::PointerToMember + 1;
@@ -1001,20 +1117,17 @@ static int PrecedenceArrowAndPeriod = prec::PointerToMember + 2;
/// operator precedence.
class ExpressionParser {
public:
- ExpressionParser(AnnotatedLine &Line) : Current(Line.First) {
- // Skip leading "}", e.g. in "} else if (...) {".
- if (Current->is(tok::r_brace))
- next();
- }
+ ExpressionParser(const FormatStyle &Style, const AdditionalKeywords &Keywords,
+ AnnotatedLine &Line)
+ : Style(Style), Keywords(Keywords), Current(Line.First) {}
/// \brief Parse expressions with the given operatore precedence.
void parse(int Precedence = 0) {
// Skip 'return' and ObjC selector colons as they are not part of a binary
// expression.
- while (Current &&
- (Current->is(tok::kw_return) ||
- (Current->is(tok::colon) && (Current->Type == TT_ObjCMethodExpr ||
- Current->Type == TT_DictLiteral))))
+ while (Current && (Current->is(tok::kw_return) ||
+ (Current->is(tok::colon) &&
+ Current->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral))))
next();
if (!Current || Precedence > PrecedenceArrowAndPeriod)
@@ -1043,7 +1156,7 @@ public:
int CurrentPrecedence = getCurrentPrecedence();
- if (Current && Current->Type == TT_SelectorName &&
+ if (Current && Current->is(TT_SelectorName) &&
Precedence == CurrentPrecedence) {
if (LatestOperator)
addFakeParenthesis(Start, prec::Level(Precedence));
@@ -1052,18 +1165,11 @@ public:
// At the end of the line or when an operator with higher precedence is
// found, insert fake parenthesis and return.
- if (!Current || Current->closesScope() ||
- (CurrentPrecedence != -1 && CurrentPrecedence < Precedence)) {
- if (LatestOperator) {
- LatestOperator->LastOperator = true;
- if (Precedence == PrecedenceArrowAndPeriod) {
- // Call expressions don't have a binary operator precedence.
- addFakeParenthesis(Start, prec::Unknown);
- } else {
- addFakeParenthesis(Start, prec::Level(Precedence));
- }
- }
- return;
+ if (!Current || (Current->closesScope() && Current->MatchingParen) ||
+ (CurrentPrecedence != -1 && CurrentPrecedence < Precedence) ||
+ (CurrentPrecedence == prec::Conditional &&
+ Precedence == prec::Assignment && Current->is(tok::colon))) {
+ break;
}
// Consume scopes: (), [], <> and {}
@@ -1080,8 +1186,17 @@ public:
Current->OperatorIndex = OperatorIndex;
++OperatorIndex;
}
+ next(/*SkipPastLeadingComments=*/Precedence > 0);
+ }
+ }
- next();
+ if (LatestOperator && (Current || Precedence > 0)) {
+ LatestOperator->LastOperator = true;
+ if (Precedence == PrecedenceArrowAndPeriod) {
+ // Call expressions don't have a binary operator precedence.
+ addFakeParenthesis(Start, prec::Unknown);
+ } else {
+ addFakeParenthesis(Start, prec::Level(Precedence));
}
}
}
@@ -1091,17 +1206,29 @@ private:
/// and other tokens that we treat like binary operators.
int getCurrentPrecedence() {
if (Current) {
- if (Current->Type == TT_ConditionalExpr)
+ const FormatToken *NextNonComment = Current->getNextNonComment();
+ if (Current->is(TT_ConditionalExpr))
return prec::Conditional;
- else if (Current->is(tok::semi) || Current->Type == TT_InlineASMColon ||
- Current->Type == TT_SelectorName)
+ else if (NextNonComment && NextNonComment->is(tok::colon) &&
+ NextNonComment->is(TT_DictLiteral))
+ return prec::Comma;
+ else if (Current->is(TT_LambdaArrow))
+ return prec::Comma;
+ else if (Current->isOneOf(tok::semi, TT_InlineASMColon,
+ TT_SelectorName) ||
+ (Current->is(tok::comment) && NextNonComment &&
+ NextNonComment->is(TT_SelectorName)))
return 0;
- else if (Current->Type == TT_RangeBasedForLoopColon)
+ else if (Current->is(TT_RangeBasedForLoopColon))
return prec::Comma;
- else if (Current->Type == TT_BinaryOperator || Current->is(tok::comma))
+ else if (Current->is(TT_BinaryOperator) || Current->is(tok::comma))
return Current->getPrecedence();
else if (Current->isOneOf(tok::period, tok::arrow))
return PrecedenceArrowAndPeriod;
+ else if (Style.Language == FormatStyle::LK_Java &&
+ Current->isOneOf(Keywords.kw_extends, Keywords.kw_implements,
+ Keywords.kw_throws))
+ return 0;
}
return -1;
}
@@ -1111,16 +1238,19 @@ private:
if (Precedence > prec::Unknown)
Start->StartsBinaryExpression = true;
if (Current) {
- ++Current->Previous->FakeRParens;
+ FormatToken *Previous = Current->Previous;
+ while (Previous->is(tok::comment) && Previous->Previous)
+ Previous = Previous->Previous;
+ ++Previous->FakeRParens;
if (Precedence > prec::Unknown)
- Current->Previous->EndsBinaryExpression = true;
+ Previous->EndsBinaryExpression = true;
}
}
/// \brief Parse unary operator expressions and surround them with fake
/// parentheses if appropriate.
void parseUnaryOperator() {
- if (!Current || Current->Type != TT_UnaryOperator) {
+ if (!Current || Current->isNot(TT_UnaryOperator)) {
parse(PrecedenceArrowAndPeriod);
return;
}
@@ -1134,33 +1264,40 @@ private:
}
void parseConditionalExpr() {
+ while (Current && Current->isTrailingComment()) {
+ next();
+ }
FormatToken *Start = Current;
parse(prec::LogicalOr);
if (!Current || !Current->is(tok::question))
return;
next();
- parseConditionalExpr();
- if (!Current || Current->Type != TT_ConditionalExpr)
+ parse(prec::Assignment);
+ if (!Current || Current->isNot(TT_ConditionalExpr))
return;
next();
- parseConditionalExpr();
+ parse(prec::Assignment);
addFakeParenthesis(Start, prec::Conditional);
}
- void next() {
+ void next(bool SkipPastLeadingComments = true) {
if (Current)
Current = Current->Next;
- while (Current && Current->isTrailingComment())
+ while (Current &&
+ (Current->NewlinesBefore == 0 || SkipPastLeadingComments) &&
+ Current->isTrailingComment())
Current = Current->Next;
}
+ const FormatStyle &Style;
+ const AdditionalKeywords &Keywords;
FormatToken *Current;
};
} // end anonymous namespace
-void
-TokenAnnotator::setCommentLineLevels(SmallVectorImpl<AnnotatedLine *> &Lines) {
+void TokenAnnotator::setCommentLineLevels(
+ SmallVectorImpl<AnnotatedLine *> &Lines) {
const AnnotatedLine *NextNonCommentLine = nullptr;
for (SmallVectorImpl<AnnotatedLine *>::reverse_iterator I = Lines.rbegin(),
E = Lines.rend();
@@ -1181,19 +1318,19 @@ void TokenAnnotator::annotate(AnnotatedLine &Line) {
I != E; ++I) {
annotate(**I);
}
- AnnotatingParser Parser(Style, Line, Ident_in);
+ AnnotatingParser Parser(Style, Line, Keywords);
Line.Type = Parser.parseLine();
if (Line.Type == LT_Invalid)
return;
- ExpressionParser ExprParser(Line);
+ ExpressionParser ExprParser(Style, Keywords, Line);
ExprParser.parse();
- if (Line.First->Type == TT_ObjCMethodSpecifier)
+ if (Line.First->is(TT_ObjCMethodSpecifier))
Line.Type = LT_ObjCMethodDecl;
- else if (Line.First->Type == TT_ObjCDecl)
+ else if (Line.First->is(TT_ObjCDecl))
Line.Type = LT_ObjCDecl;
- else if (Line.First->Type == TT_ObjCProperty)
+ else if (Line.First->is(TT_ObjCProperty))
Line.Type = LT_ObjCProperty;
Line.First->SpacesRequiredBefore = 1;
@@ -1203,13 +1340,11 @@ void TokenAnnotator::annotate(AnnotatedLine &Line) {
// This function heuristically determines whether 'Current' starts the name of a
// function declaration.
static bool isFunctionDeclarationName(const FormatToken &Current) {
- if (Current.Type != TT_StartOfName ||
- Current.NestingLevel != 0 ||
- Current.Previous->Type == TT_StartOfName)
+ if (!Current.is(TT_StartOfName) || Current.NestingLevel != 0)
return false;
const FormatToken *Next = Current.Next;
for (; Next; Next = Next->Next) {
- if (Next->Type == TT_TemplateOpener) {
+ if (Next->is(TT_TemplateOpener)) {
Next = Next->MatchingParen;
} else if (Next->is(tok::coloncolon)) {
Next = Next->Next;
@@ -1229,7 +1364,7 @@ static bool isFunctionDeclarationName(const FormatToken &Current) {
for (const FormatToken *Tok = Next->Next; Tok != Next->MatchingParen;
Tok = Tok->Next) {
if (Tok->is(tok::kw_const) || Tok->isSimpleTypeSpecifier() ||
- Tok->Type == TT_PointerOrReference || Tok->Type == TT_StartOfName)
+ Tok->isOneOf(TT_PointerOrReference, TT_StartOfName))
return true;
if (Tok->isOneOf(tok::l_brace, tok::string_literal) || Tok->Tok.isLiteral())
return false;
@@ -1253,7 +1388,7 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
while (Current) {
if (isFunctionDeclarationName(*Current))
Current->Type = TT_FunctionDeclarationName;
- if (Current->Type == TT_LineComment) {
+ if (Current->is(TT_LineComment)) {
if (Current->Previous->BlockKind == BK_BracedInit &&
Current->Previous->opensScope())
Current->SpacesRequiredBefore = Style.Cpp11BracedListStyle ? 0 : 1;
@@ -1273,7 +1408,7 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
if (Parameter->isOneOf(tok::comment, tok::r_brace))
break;
if (Parameter->Previous && Parameter->Previous->is(tok::comma)) {
- if (Parameter->Previous->Type != TT_CtorInitializerComma &&
+ if (!Parameter->Previous->is(TT_CtorInitializerComma) &&
Parameter->HasUnescapedNewline)
Parameter->MustBreakBefore = true;
break;
@@ -1288,6 +1423,13 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
Current->MustBreakBefore =
Current->MustBreakBefore || mustBreakBefore(Line, *Current);
+ if (Style.AlwaysBreakAfterDefinitionReturnType && InFunctionDecl &&
+ Current->is(TT_FunctionDeclarationName) &&
+ !Line.Last->isOneOf(tok::semi, tok::comment)) // Only for definitions.
+ // FIXME: Line.Last points to other characters than tok::semi
+ // and tok::lbrace.
+ Current->MustBreakBefore = true;
+
Current->CanBreakBefore =
Current->MustBreakBefore || canBreakBefore(Line, *Current);
unsigned ChildSize = 0;
@@ -1296,15 +1438,17 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
ChildSize = LastOfChild.isTrailingComment() ? Style.ColumnLimit
: LastOfChild.TotalLength + 1;
}
- if (Current->MustBreakBefore || Current->Previous->Children.size() > 1 ||
+ const FormatToken *Prev = Current->Previous;
+ if (Current->MustBreakBefore || Prev->Children.size() > 1 ||
+ (Prev->Children.size() == 1 &&
+ Prev->Children[0]->First->MustBreakBefore) ||
Current->IsMultiline)
- Current->TotalLength = Current->Previous->TotalLength + Style.ColumnLimit;
+ Current->TotalLength = Prev->TotalLength + Style.ColumnLimit;
else
- Current->TotalLength = Current->Previous->TotalLength +
- Current->ColumnWidth + ChildSize +
- Current->SpacesRequiredBefore;
+ Current->TotalLength = Prev->TotalLength + Current->ColumnWidth +
+ ChildSize + Current->SpacesRequiredBefore;
- if (Current->Type == TT_CtorInitializerColon)
+ if (Current->is(TT_CtorInitializerColon))
InFunctionDecl = false;
// FIXME: Only calculate this if CanBreakBefore is true once static
@@ -1349,20 +1493,34 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
if (Left.is(tok::semi))
return 0;
+
+ if (Style.Language == FormatStyle::LK_Java) {
+ if (Right.isOneOf(Keywords.kw_extends, Keywords.kw_throws))
+ return 1;
+ if (Right.is(Keywords.kw_implements))
+ return 2;
+ if (Left.is(tok::comma) && Left.NestingLevel == 0)
+ return 3;
+ } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Right.is(Keywords.kw_function))
+ return 100;
+ }
+
if (Left.is(tok::comma) || (Right.is(tok::identifier) && Right.Next &&
- Right.Next->Type == TT_DictLiteral))
+ Right.Next->is(TT_DictLiteral)))
return 1;
if (Right.is(tok::l_square)) {
if (Style.Language == FormatStyle::LK_Proto)
return 1;
- if (Right.Type != TT_ObjCMethodExpr && Right.Type != TT_LambdaLSquare)
+ if (!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare))
return 500;
}
- if (Right.Type == TT_StartOfName ||
- Right.Type == TT_FunctionDeclarationName || Right.is(tok::kw_operator)) {
+
+ if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) ||
+ Right.is(tok::kw_operator)) {
if (Line.First->is(tok::kw_for) && Right.PartOfMultiVariableDeclStmt)
return 3;
- if (Left.Type == TT_StartOfName)
+ if (Left.is(TT_StartOfName))
return 20;
if (InFunctionDecl && Right.NestingLevel == 0)
return Style.PenaltyReturnTypeOnItsOwnLine;
@@ -1370,7 +1528,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
}
if (Left.is(tok::equal) && Right.is(tok::l_brace))
return 150;
- if (Left.Type == TT_CastRParen)
+ if (Left.is(TT_CastRParen))
return 100;
if (Left.is(tok::coloncolon) ||
(Right.is(tok::period) && Style.Language == FormatStyle::LK_Proto))
@@ -1378,8 +1536,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
if (Left.isOneOf(tok::kw_class, tok::kw_struct))
return 5000;
- if (Left.Type == TT_RangeBasedForLoopColon ||
- Left.Type == TT_InheritanceColon)
+ if (Left.isOneOf(TT_RangeBasedForLoopColon, TT_InheritanceColon))
return 2;
if (Right.isMemberAccess()) {
@@ -1389,8 +1546,13 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 150;
}
- if (Right.Type == TT_TrailingAnnotation &&
+ if (Right.is(TT_TrailingAnnotation) &&
(!Right.Next || Right.Next->isNot(tok::l_paren))) {
+ // Moving trailing annotations to the next line is fine for ObjC method
+ // declarations.
+ if (Line.First->is(TT_ObjCMethodSpecifier))
+
+ return 10;
// Generally, breaking before a trailing annotation is bad unless it is
// function-like. It seems to be especially preferable to keep standard
// annotations (i.e. "const", "final" and "override") on the same line.
@@ -1406,18 +1568,27 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
// In Objective-C method expressions, prefer breaking before "param:" over
// breaking after it.
- if (Right.Type == TT_SelectorName)
+ if (Right.is(TT_SelectorName))
return 0;
- if (Left.is(tok::colon) && Left.Type == TT_ObjCMethodExpr)
+ if (Left.is(tok::colon) && Left.is(TT_ObjCMethodExpr))
return Line.MightBeFunctionDecl ? 50 : 500;
- if (Left.is(tok::l_paren) && InFunctionDecl)
+ if (Left.is(tok::l_paren) && InFunctionDecl && Style.AlignAfterOpenBracket)
return 100;
if (Left.is(tok::equal) && InFunctionDecl)
return 110;
- if (Left.opensScope())
+ if (Right.is(tok::r_brace))
+ return 1;
+ if (Left.is(TT_TemplateOpener))
+ return 100;
+ if (Left.opensScope()) {
+ if (!Style.AlignAfterOpenBracket)
+ return 0;
return Left.ParameterCount > 1 ? Style.PenaltyBreakBeforeFirstCallParameter
: 19;
+ }
+ if (Left.is(TT_JavaAnnotation))
+ return 50;
if (Right.is(tok::lessless)) {
if (Left.is(tok::string_literal)) {
@@ -1433,7 +1604,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
}
return 1; // Breaking at a << is really cheap.
}
- if (Left.Type == TT_ConditionalExpr)
+ if (Left.is(TT_ConditionalExpr))
return prec::Conditional;
prec::Level Level = Left.getPrecedence();
@@ -1446,18 +1617,6 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
const FormatToken &Left,
const FormatToken &Right) {
- if (Style.Language == FormatStyle::LK_Proto) {
- if (Right.is(tok::period) &&
- (Left.TokenText == "optional" || Left.TokenText == "required" ||
- Left.TokenText == "repeated"))
- return true;
- if (Right.is(tok::l_paren) &&
- (Left.TokenText == "returns" || Left.TokenText == "option"))
- return true;
- } else if (Style.Language == FormatStyle::LK_JavaScript) {
- if (Left.TokenText == "var")
- return true;
- }
if (Left.is(tok::kw_return) && Right.isNot(tok::semi))
return true;
if (Style.ObjCSpaceAfterProperty && Line.Type == LT_ObjCProperty &&
@@ -1470,21 +1629,16 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.is(tok::l_paren) && Right.is(tok::r_paren))
return Style.SpaceInEmptyParentheses;
if (Left.is(tok::l_paren) || Right.is(tok::r_paren))
- return (Right.Type == TT_CastRParen ||
- (Left.MatchingParen && Left.MatchingParen->Type == TT_CastRParen))
+ return (Right.is(TT_CastRParen) ||
+ (Left.MatchingParen && Left.MatchingParen->is(TT_CastRParen)))
? Style.SpacesInCStyleCastParentheses
: Style.SpacesInParentheses;
- if (Style.SpacesInAngles &&
- ((Left.Type == TT_TemplateOpener) != (Right.Type == TT_TemplateCloser)))
- return true;
if (Right.isOneOf(tok::semi, tok::comma))
return false;
if (Right.is(tok::less) &&
- (Left.is(tok::kw_template) ||
+ (Left.isOneOf(tok::kw_template, tok::r_paren) ||
(Line.Type == LT_ObjCDecl && Style.ObjCSpaceBeforeProtocolList)))
return true;
- if (Left.is(tok::arrow) || Right.is(tok::arrow))
- return false;
if (Left.isOneOf(tok::exclaim, tok::tilde))
return false;
if (Left.is(tok::at) &&
@@ -1494,69 +1648,72 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return false;
if (Left.is(tok::coloncolon))
return false;
- if (Right.is(tok::coloncolon) && Left.isNot(tok::l_brace))
- return (Left.is(tok::less) && Style.Standard == FormatStyle::LS_Cpp03) ||
- !Left.isOneOf(tok::identifier, tok::greater, tok::l_paren,
- tok::r_paren, tok::less);
if (Left.is(tok::less) || Right.isOneOf(tok::greater, tok::less))
return false;
if (Right.is(tok::ellipsis))
return Left.Tok.isLiteral();
if (Left.is(tok::l_square) && Right.is(tok::amp))
return false;
- if (Right.Type == TT_PointerOrReference)
+ if (Right.is(TT_PointerOrReference))
return Left.Tok.isLiteral() ||
- ((Left.Type != TT_PointerOrReference) && Left.isNot(tok::l_paren) &&
+ (!Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
Style.PointerAlignment != FormatStyle::PAS_Left);
- if (Right.Type == TT_FunctionTypeLParen && Left.isNot(tok::l_paren) &&
- (Left.Type != TT_PointerOrReference || Style.PointerAlignment != FormatStyle::PAS_Right))
+ if (Right.is(TT_FunctionTypeLParen) && Left.isNot(tok::l_paren) &&
+ (!Left.is(TT_PointerOrReference) ||
+ Style.PointerAlignment != FormatStyle::PAS_Right))
return true;
- if (Left.Type == TT_PointerOrReference)
- return Right.Tok.isLiteral() || Right.Type == TT_BlockComment ||
- ((Right.Type != TT_PointerOrReference) &&
- Right.isNot(tok::l_paren) && Style.PointerAlignment != FormatStyle::PAS_Right &&
- Left.Previous &&
+ if (Left.is(TT_PointerOrReference))
+ return Right.Tok.isLiteral() || Right.is(TT_BlockComment) ||
+ (!Right.isOneOf(TT_PointerOrReference, tok::l_paren) &&
+ Style.PointerAlignment != FormatStyle::PAS_Right && Left.Previous &&
!Left.Previous->isOneOf(tok::l_paren, tok::coloncolon));
if (Right.is(tok::star) && Left.is(tok::l_paren))
return false;
if (Left.is(tok::l_square))
- return Left.Type == TT_ArrayInitializerLSquare &&
- Style.SpacesInContainerLiterals && Right.isNot(tok::r_square);
+ return (Left.is(TT_ArrayInitializerLSquare) &&
+ Style.SpacesInContainerLiterals && Right.isNot(tok::r_square)) ||
+ (Left.is(TT_ArraySubscriptLSquare) && Style.SpacesInSquareBrackets &&
+ Right.isNot(tok::r_square));
if (Right.is(tok::r_square))
- return Right.MatchingParen && Style.SpacesInContainerLiterals &&
- Right.MatchingParen->Type == TT_ArrayInitializerLSquare;
- if (Right.is(tok::l_square) && Right.Type != TT_ObjCMethodExpr &&
- Right.Type != TT_LambdaLSquare && Left.isNot(tok::numeric_constant) &&
- Left.Type != TT_DictLiteral)
+ return Right.MatchingParen &&
+ ((Style.SpacesInContainerLiterals &&
+ Right.MatchingParen->is(TT_ArrayInitializerLSquare)) ||
+ (Style.SpacesInSquareBrackets &&
+ Right.MatchingParen->is(TT_ArraySubscriptLSquare)));
+ if (Right.is(tok::l_square) &&
+ !Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare) &&
+ !Left.isOneOf(tok::numeric_constant, TT_DictLiteral))
return false;
if (Left.is(tok::colon))
- return Left.Type != TT_ObjCMethodExpr;
- if (Left.Type == TT_BlockComment)
+ return !Left.is(TT_ObjCMethodExpr);
+ if (Left.is(tok::l_brace) && Right.is(tok::r_brace))
+ return !Left.Children.empty(); // No spaces in "{}".
+ if ((Left.is(tok::l_brace) && Left.BlockKind != BK_Block) ||
+ (Right.is(tok::r_brace) && Right.MatchingParen &&
+ Right.MatchingParen->BlockKind != BK_Block))
+ return !Style.Cpp11BracedListStyle;
+ if (Left.is(TT_BlockComment))
return !Left.TokenText.endswith("=*/");
if (Right.is(tok::l_paren)) {
- if (Left.is(tok::r_paren) && Left.Type == TT_AttributeParen)
+ if (Left.is(tok::r_paren) && Left.is(TT_AttributeParen))
return true;
- return Line.Type == LT_ObjCDecl ||
- Left.isOneOf(tok::kw_new, tok::kw_delete, tok::semi) ||
+ return Line.Type == LT_ObjCDecl || Left.is(tok::semi) ||
(Style.SpaceBeforeParens != FormatStyle::SBPO_Never &&
(Left.isOneOf(tok::kw_if, tok::kw_for, tok::kw_while,
- tok::kw_switch, tok::kw_catch, tok::kw_case) ||
+ tok::kw_switch, tok::kw_case) ||
+ (Left.isOneOf(tok::kw_try, tok::kw_catch, tok::kw_new,
+ tok::kw_delete) &&
+ (!Left.Previous || Left.Previous->isNot(tok::period))) ||
Left.IsForEachMacro)) ||
(Style.SpaceBeforeParens == FormatStyle::SBPO_Always &&
- Left.isOneOf(tok::identifier, tok::kw___attribute) &&
+ (Left.is(tok::identifier) || Left.isFunctionLikeKeyword()) &&
Line.Type != LT_PreprocessorDirective);
}
if (Left.is(tok::at) && Right.Tok.getObjCKeywordID() != tok::objc_not_keyword)
return false;
- if (Left.is(tok::l_brace) && Right.is(tok::r_brace))
- return !Left.Children.empty(); // No spaces in "{}".
- if ((Left.is(tok::l_brace) && Left.BlockKind != BK_Block) ||
- (Right.is(tok::r_brace) && Right.MatchingParen &&
- Right.MatchingParen->BlockKind != BK_Block))
- return !Style.Cpp11BracedListStyle;
- if (Right.Type == TT_UnaryOperator)
+ if (Right.is(TT_UnaryOperator))
return !Left.isOneOf(tok::l_paren, tok::l_square, tok::at) &&
- (Left.isNot(tok::colon) || Left.Type != TT_ObjCMethodExpr);
+ (Left.isNot(tok::colon) || Left.isNot(TT_ObjCMethodExpr));
if ((Left.isOneOf(tok::identifier, tok::greater, tok::r_square,
tok::r_paren) ||
Left.isSimpleTypeSpecifier()) &&
@@ -1567,78 +1724,120 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return false;
if (Right.is(tok::hash) && Left.is(tok::identifier) && Left.TokenText == "L")
return false;
+ if (Left.is(TT_TemplateCloser) && Left.MatchingParen &&
+ Left.MatchingParen->Previous &&
+ Left.MatchingParen->Previous->is(tok::period))
+ // A.<B>DoSomething();
+ return false;
+ if (Left.is(TT_TemplateCloser) && Right.is(tok::l_square))
+ return false;
return true;
}
bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
- const FormatToken &Tok) {
- if (Tok.Tok.getIdentifierInfo() && Tok.Previous->Tok.getIdentifierInfo())
+ const FormatToken &Right) {
+ const FormatToken &Left = *Right.Previous;
+ if (Style.Language == FormatStyle::LK_Proto) {
+ if (Right.is(tok::period) &&
+ Left.isOneOf(Keywords.kw_optional, Keywords.kw_required,
+ Keywords.kw_repeated))
+ return true;
+ if (Right.is(tok::l_paren) &&
+ Left.isOneOf(Keywords.kw_returns, Keywords.kw_option))
+ return true;
+ } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Left.is(Keywords.kw_var))
+ return true;
+ } else if (Style.Language == FormatStyle::LK_Java) {
+ if (Left.is(tok::r_square) && Right.is(tok::l_brace))
+ return true;
+ if (Left.is(TT_LambdaArrow) || Right.is(TT_LambdaArrow))
+ return true;
+ if (Left.is(Keywords.kw_synchronized) && Right.is(tok::l_paren))
+ return Style.SpaceBeforeParens != FormatStyle::SBPO_Never;
+ if ((Left.isOneOf(tok::kw_static, tok::kw_public, tok::kw_private,
+ tok::kw_protected) ||
+ Left.isOneOf(Keywords.kw_final, Keywords.kw_abstract,
+ Keywords.kw_native)) &&
+ Right.is(TT_TemplateOpener))
+ return true;
+ }
+ if (Right.Tok.getIdentifierInfo() && Left.Tok.getIdentifierInfo())
return true; // Never ever merge two identifiers.
- if (Tok.Previous->Type == TT_ImplicitStringLiteral)
- return Tok.WhitespaceRange.getBegin() != Tok.WhitespaceRange.getEnd();
+ if (Left.is(TT_ImplicitStringLiteral))
+ return Right.WhitespaceRange.getBegin() != Right.WhitespaceRange.getEnd();
if (Line.Type == LT_ObjCMethodDecl) {
- if (Tok.Previous->Type == TT_ObjCMethodSpecifier)
+ if (Left.is(TT_ObjCMethodSpecifier))
return true;
- if (Tok.Previous->is(tok::r_paren) && Tok.is(tok::identifier))
+ if (Left.is(tok::r_paren) && Right.is(tok::identifier))
// Don't space between ')' and <id>
return false;
}
if (Line.Type == LT_ObjCProperty &&
- (Tok.is(tok::equal) || Tok.Previous->is(tok::equal)))
+ (Right.is(tok::equal) || Left.is(tok::equal)))
return false;
- if (Tok.Type == TT_TrailingReturnArrow ||
- Tok.Previous->Type == TT_TrailingReturnArrow)
+ if (Right.is(TT_TrailingReturnArrow) || Left.is(TT_TrailingReturnArrow))
return true;
- if (Tok.Previous->is(tok::comma))
+ if (Left.is(tok::comma))
return true;
- if (Tok.is(tok::comma))
+ if (Right.is(tok::comma))
return false;
- if (Tok.Type == TT_CtorInitializerColon || Tok.Type == TT_ObjCBlockLParen)
+ if (Right.isOneOf(TT_CtorInitializerColon, TT_ObjCBlockLParen))
return true;
- if (Tok.Previous->Tok.is(tok::kw_operator))
- return Tok.is(tok::coloncolon);
- if (Tok.Type == TT_OverloadedOperatorLParen)
+ if (Left.is(tok::kw_operator))
+ return Right.is(tok::coloncolon);
+ if (Right.is(TT_OverloadedOperatorLParen))
return false;
- if (Tok.is(tok::colon))
+ if (Right.is(tok::colon))
return !Line.First->isOneOf(tok::kw_case, tok::kw_default) &&
- Tok.getNextNonComment() && Tok.Type != TT_ObjCMethodExpr &&
- !Tok.Previous->is(tok::question) &&
- (Tok.Type != TT_DictLiteral || Style.SpacesInContainerLiterals);
- if (Tok.Previous->Type == TT_UnaryOperator ||
- Tok.Previous->Type == TT_CastRParen)
- return Tok.Type == TT_BinaryOperator;
- if (Tok.Previous->is(tok::greater) && Tok.is(tok::greater)) {
- return Tok.Type == TT_TemplateCloser &&
- Tok.Previous->Type == TT_TemplateCloser &&
+ Right.getNextNonComment() && Right.isNot(TT_ObjCMethodExpr) &&
+ !Left.is(tok::question) &&
+ !(Right.is(TT_InlineASMColon) && Left.is(tok::coloncolon)) &&
+ (Right.isNot(TT_DictLiteral) || Style.SpacesInContainerLiterals);
+ if (Left.is(TT_UnaryOperator))
+ return Right.is(TT_BinaryOperator);
+ if (Left.is(TT_CastRParen))
+ return Style.SpaceAfterCStyleCast || Right.is(TT_BinaryOperator);
+ if (Left.is(tok::greater) && Right.is(tok::greater)) {
+ return Right.is(TT_TemplateCloser) && Left.is(TT_TemplateCloser) &&
(Style.Standard != FormatStyle::LS_Cpp11 || Style.SpacesInAngles);
}
- if (Tok.isOneOf(tok::arrowstar, tok::periodstar) ||
- Tok.Previous->isOneOf(tok::arrowstar, tok::periodstar))
+ if (Right.isOneOf(tok::arrow, tok::period, tok::arrowstar, tok::periodstar) ||
+ Left.isOneOf(tok::arrow, tok::period, tok::arrowstar, tok::periodstar))
return false;
if (!Style.SpaceBeforeAssignmentOperators &&
- Tok.getPrecedence() == prec::Assignment)
+ Right.getPrecedence() == prec::Assignment)
return false;
- if ((Tok.Type == TT_BinaryOperator && !Tok.Previous->is(tok::l_paren)) ||
- Tok.Previous->Type == TT_BinaryOperator ||
- Tok.Previous->Type == TT_ConditionalExpr)
+ if (Right.is(tok::coloncolon) && Left.isNot(tok::l_brace))
+ return (Left.is(TT_TemplateOpener) &&
+ Style.Standard == FormatStyle::LS_Cpp03) ||
+ !(Left.isOneOf(tok::identifier, tok::l_paren, tok::r_paren) ||
+ Left.isOneOf(TT_TemplateCloser, TT_TemplateOpener));
+ if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser)))
+ return Style.SpacesInAngles;
+ if ((Right.is(TT_BinaryOperator) && !Left.is(tok::l_paren)) ||
+ Left.isOneOf(TT_BinaryOperator, TT_ConditionalExpr))
return true;
- if (Tok.Previous->Type == TT_TemplateCloser && Tok.is(tok::l_paren))
+ if (Left.is(TT_TemplateCloser) && Right.is(tok::l_paren))
+ return Style.SpaceBeforeParens == FormatStyle::SBPO_Always;
+ if (Right.is(TT_TemplateOpener) && Left.is(tok::r_paren) &&
+ Left.MatchingParen && Left.MatchingParen->is(TT_OverloadedOperatorLParen))
return false;
- if (Tok.is(tok::less) && Tok.Previous->isNot(tok::l_paren) &&
+ if (Right.is(tok::less) && Left.isNot(tok::l_paren) &&
Line.First->is(tok::hash))
return true;
- if (Tok.Type == TT_TrailingUnaryOperator)
+ if (Right.is(TT_TrailingUnaryOperator))
return false;
- if (Tok.Previous->Type == TT_RegexLiteral)
+ if (Left.is(TT_RegexLiteral))
return false;
- return spaceRequiredBetween(Line, *Tok.Previous, Tok);
+ return spaceRequiredBetween(Line, Left, Right);
}
// Returns 'true' if 'Tok' is a brace we'd want to break before in Allman style.
static bool isAllmanBrace(const FormatToken &Tok) {
return Tok.is(tok::l_brace) && Tok.BlockKind == BK_Block &&
- Tok.Type != TT_ObjCBlockLBrace && Tok.Type != TT_DictLiteral;
+ !Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral);
}
bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
@@ -1646,54 +1845,66 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
const FormatToken &Left = *Right.Previous;
if (Right.NewlinesBefore > 1)
return true;
- if (Right.is(tok::comment)) {
- return Right.Previous->BlockKind != BK_BracedInit &&
- Right.Previous->Type != TT_CtorInitializerColon &&
+
+ // If the last token before a '}' is a comma or a trailing comment, the
+ // intention is to insert a line break after it in order to make shuffling
+ // around entries easier.
+ const FormatToken *BeforeClosingBrace = nullptr;
+ if (Left.is(tok::l_brace) && Left.BlockKind != BK_Block && Left.MatchingParen)
+ BeforeClosingBrace = Left.MatchingParen->Previous;
+ else if (Right.is(tok::r_brace) && Right.BlockKind != BK_Block)
+ BeforeClosingBrace = &Left;
+ if (BeforeClosingBrace && (BeforeClosingBrace->is(tok::comma) ||
+ BeforeClosingBrace->isTrailingComment()))
+ return true;
+
+ if (Right.is(tok::comment))
+ return Left.BlockKind != BK_BracedInit &&
+ Left.isNot(TT_CtorInitializerColon) &&
(Right.NewlinesBefore > 0 && Right.HasUnescapedNewline);
- } else if (Right.Previous->isTrailingComment() ||
- (Right.isStringLiteral() && Right.Previous->isStringLiteral())) {
+ if (Right.Previous->isTrailingComment() ||
+ (Right.isStringLiteral() && Right.Previous->isStringLiteral()))
return true;
- } else if (Right.Previous->IsUnterminatedLiteral) {
+ if (Right.Previous->IsUnterminatedLiteral)
return true;
- } else if (Right.is(tok::lessless) && Right.Next &&
- Right.Previous->is(tok::string_literal) &&
- Right.Next->is(tok::string_literal)) {
+ if (Right.is(tok::lessless) && Right.Next &&
+ Right.Previous->is(tok::string_literal) &&
+ Right.Next->is(tok::string_literal))
return true;
- } else if (Right.Previous->ClosesTemplateDeclaration &&
- Right.Previous->MatchingParen &&
- Right.Previous->MatchingParen->NestingLevel == 0 &&
- Style.AlwaysBreakTemplateDeclarations) {
+ if (Right.Previous->ClosesTemplateDeclaration &&
+ Right.Previous->MatchingParen &&
+ Right.Previous->MatchingParen->NestingLevel == 0 &&
+ Style.AlwaysBreakTemplateDeclarations)
return true;
- } else if ((Right.Type == TT_CtorInitializerComma ||
- Right.Type == TT_CtorInitializerColon) &&
- Style.BreakConstructorInitializersBeforeComma &&
- !Style.ConstructorInitializerAllOnOneLineOrOnePerLine) {
+ if ((Right.isOneOf(TT_CtorInitializerComma, TT_CtorInitializerColon)) &&
+ Style.BreakConstructorInitializersBeforeComma &&
+ !Style.ConstructorInitializerAllOnOneLineOrOnePerLine)
return true;
- } else if (Right.is(tok::string_literal) &&
- Right.TokenText.startswith("R\"")) {
+ if (Right.is(tok::string_literal) && Right.TokenText.startswith("R\""))
// Raw string literals are special wrt. line breaks. The author has made a
// deliberate choice and might have aligned the contents of the string
// literal accordingly. Thus, we try keep existing line breaks.
return Right.NewlinesBefore > 0;
- } else if (Right.Previous->is(tok::l_brace) && Right.NestingLevel == 1 &&
- Style.Language == FormatStyle::LK_Proto) {
- // Don't enums onto single lines in protocol buffers.
+ if (Right.Previous->is(tok::l_brace) && Right.NestingLevel == 1 &&
+ Style.Language == FormatStyle::LK_Proto)
+ // Don't put enums onto single lines in protocol buffers.
return true;
- } else if (isAllmanBrace(Left) || isAllmanBrace(Right)) {
+ if (Style.Language == FormatStyle::LK_JavaScript && Right.is(tok::r_brace) &&
+ Left.is(tok::l_brace) && !Left.Children.empty())
+ // Support AllowShortFunctionsOnASingleLine for JavaScript.
+ return Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_None ||
+ (Left.NestingLevel == 0 && Line.Level == 0 &&
+ Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_Inline);
+ if (isAllmanBrace(Left) || isAllmanBrace(Right))
return Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
Style.BreakBeforeBraces == FormatStyle::BS_GNU;
- }
-
- // If the last token before a '}' is a comma or a comment, the intention is to
- // insert a line break after it in order to make shuffling around entries
- // easier.
- const FormatToken *BeforeClosingBrace = nullptr;
- if (Left.is(tok::l_brace) && Left.MatchingParen)
- BeforeClosingBrace = Left.MatchingParen->Previous;
- else if (Right.is(tok::r_brace))
- BeforeClosingBrace = Right.Previous;
- if (BeforeClosingBrace &&
- BeforeClosingBrace->isOneOf(tok::comma, tok::comment))
+ if (Style.Language == FormatStyle::LK_Proto && Left.isNot(tok::l_brace) &&
+ Right.is(TT_SelectorName))
+ return true;
+ if (Left.is(TT_ObjCBlockLBrace) && !Style.AllowShortBlocksOnASingleLine)
+ return true;
+ if (Right.is(tok::lessless) && Left.is(tok::identifier) &&
+ Left.TokenText == "endl")
return true;
if (Style.Language == FormatStyle::LK_JavaScript) {
@@ -1701,6 +1912,17 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Right.is(tok::char_constant) && Left.is(tok::plus) && Left.Previous &&
Left.Previous->is(tok::char_constant))
return true;
+ if (Left.is(TT_DictLiteral) && Left.is(tok::l_brace) &&
+ Left.NestingLevel == 0)
+ return true;
+ } else if (Style.Language == FormatStyle::LK_Java) {
+ if (Left.is(TT_LeadingJavaAnnotation) &&
+ Right.isNot(TT_LeadingJavaAnnotation) && Right.isNot(tok::l_paren) &&
+ Line.Last->is(tok::l_brace))
+ return true;
+ if (Right.is(tok::plus) && Left.is(tok::string_literal) && Right.Next &&
+ Right.Next->is(tok::string_literal))
+ return true;
}
return false;
@@ -1709,12 +1931,24 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
const FormatToken &Right) {
const FormatToken &Left = *Right.Previous;
+
+ if (Style.Language == FormatStyle::LK_Java) {
+ if (Left.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
+ Keywords.kw_implements))
+ return false;
+ if (Right.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
+ Keywords.kw_implements))
+ return true;
+ }
+
if (Left.is(tok::at))
return false;
if (Left.Tok.getObjCKeywordID() == tok::objc_interface)
return false;
- if (Right.Type == TT_StartOfName ||
- Right.Type == TT_FunctionDeclarationName || Right.is(tok::kw_operator))
+ if (Left.isOneOf(TT_JavaAnnotation, TT_LeadingJavaAnnotation))
+ return !Right.is(tok::l_paren);
+ if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) ||
+ Right.is(tok::kw_operator))
return true;
if (Right.isTrailingComment())
// We rely on MustBreakBefore being set correctly here as we should not
@@ -1725,47 +1959,46 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return Left.BlockKind == BK_BracedInit;
if (Left.is(tok::question) && Right.is(tok::colon))
return false;
- if (Right.Type == TT_ConditionalExpr || Right.is(tok::question))
+ if (Right.is(TT_ConditionalExpr) || Right.is(tok::question))
return Style.BreakBeforeTernaryOperators;
- if (Left.Type == TT_ConditionalExpr || Left.is(tok::question))
+ if (Left.is(TT_ConditionalExpr) || Left.is(tok::question))
return !Style.BreakBeforeTernaryOperators;
- if (Right.Type == TT_InheritanceColon)
+ if (Right.is(TT_InheritanceColon))
return true;
- if (Right.is(tok::colon) && (Right.Type != TT_CtorInitializerColon &&
- Right.Type != TT_InlineASMColon))
+ if (Right.is(tok::colon) &&
+ !Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon))
return false;
- if (Left.is(tok::colon) &&
- (Left.Type == TT_DictLiteral || Left.Type == TT_ObjCMethodExpr))
+ if (Left.is(tok::colon) && (Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)))
return true;
- if (Right.Type == TT_SelectorName)
+ if (Right.is(TT_SelectorName))
return true;
if (Left.is(tok::r_paren) && Line.Type == LT_ObjCProperty)
return true;
if (Left.ClosesTemplateDeclaration)
return true;
- if (Right.Type == TT_RangeBasedForLoopColon ||
- Right.Type == TT_OverloadedOperatorLParen ||
- Right.Type == TT_OverloadedOperator)
+ if (Right.isOneOf(TT_RangeBasedForLoopColon, TT_OverloadedOperatorLParen,
+ TT_OverloadedOperator))
return false;
- if (Left.Type == TT_RangeBasedForLoopColon)
+ if (Left.is(TT_RangeBasedForLoopColon))
return true;
- if (Right.Type == TT_RangeBasedForLoopColon)
+ if (Right.is(TT_RangeBasedForLoopColon))
return false;
- if (Left.Type == TT_PointerOrReference || Left.Type == TT_TemplateCloser ||
- Left.Type == TT_UnaryOperator || Left.is(tok::kw_operator))
+ if (Left.isOneOf(TT_PointerOrReference, TT_TemplateCloser,
+ TT_UnaryOperator) ||
+ Left.is(tok::kw_operator))
return false;
if (Left.is(tok::equal) && Line.Type == LT_VirtualFunctionDecl)
return false;
- if (Left.is(tok::l_paren) && Left.Type == TT_AttributeParen)
+ if (Left.is(tok::l_paren) && Left.is(TT_AttributeParen))
return false;
if (Left.is(tok::l_paren) && Left.Previous &&
- (Left.Previous->Type == TT_BinaryOperator ||
- Left.Previous->Type == TT_CastRParen || Left.Previous->is(tok::kw_if)))
+ (Left.Previous->isOneOf(TT_BinaryOperator, TT_CastRParen) ||
+ Left.Previous->is(tok::kw_if)))
return false;
- if (Right.Type == TT_ImplicitStringLiteral)
+ if (Right.is(TT_ImplicitStringLiteral))
return false;
- if (Right.is(tok::r_paren) || Right.Type == TT_TemplateCloser)
+ if (Right.is(tok::r_paren) || Right.is(TT_TemplateCloser))
return false;
// We only break before r_brace if there was a corresponding break before
@@ -1775,7 +2008,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// Allow breaking after a trailing annotation, e.g. after a method
// declaration.
- if (Left.Type == TT_TrailingAnnotation)
+ if (Left.is(TT_TrailingAnnotation))
return !Right.isOneOf(tok::l_brace, tok::semi, tok::equal, tok::l_paren,
tok::less, tok::coloncolon);
@@ -1785,29 +2018,35 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Left.is(tok::identifier) && Right.is(tok::string_literal))
return true;
- if (Right.is(tok::identifier) && Right.Next &&
- Right.Next->Type == TT_DictLiteral)
+ if (Right.is(tok::identifier) && Right.Next && Right.Next->is(TT_DictLiteral))
return true;
- if (Left.Type == TT_CtorInitializerComma &&
+ if (Left.is(TT_CtorInitializerComma) &&
Style.BreakConstructorInitializersBeforeComma)
return false;
- if (Right.Type == TT_CtorInitializerComma &&
+ if (Right.is(TT_CtorInitializerComma) &&
Style.BreakConstructorInitializersBeforeComma)
return true;
if (Left.is(tok::greater) && Right.is(tok::greater) &&
- Left.Type != TT_TemplateCloser)
+ Left.isNot(TT_TemplateCloser))
return false;
- if (Right.Type == TT_BinaryOperator && Style.BreakBeforeBinaryOperators)
+ if (Right.is(TT_BinaryOperator) &&
+ Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None &&
+ (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_All ||
+ Right.getPrecedence() != prec::Assignment))
+ return true;
+ if (Left.is(TT_ArrayInitializerLSquare))
+ return true;
+ if (Right.is(tok::kw_typename) && Left.isNot(tok::kw_const))
return true;
- if (Left.Type == TT_ArrayInitializerLSquare)
+ if (Left.isBinaryOperator() && !Left.isOneOf(tok::arrowstar, tok::lessless) &&
+ Style.BreakBeforeBinaryOperators != FormatStyle::BOS_All &&
+ (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None ||
+ Left.getPrecedence() == prec::Assignment))
return true;
- return (Left.isBinaryOperator() &&
- !Left.isOneOf(tok::arrowstar, tok::lessless) &&
- !Style.BreakBeforeBinaryOperators) ||
- Left.isOneOf(tok::comma, tok::coloncolon, tok::semi, tok::l_brace,
+ return Left.isOneOf(tok::comma, tok::coloncolon, tok::semi, tok::l_brace,
tok::kw_class, tok::kw_struct) ||
- Right.isMemberAccess() ||
+ Right.isMemberAccess() || Right.is(TT_TrailingReturnArrow) ||
Right.isOneOf(tok::lessless, tok::colon, tok::l_square, tok::at) ||
(Left.is(tok::r_paren) &&
Right.isOneOf(tok::identifier, tok::kw_const)) ||
diff --git a/lib/Format/TokenAnnotator.h b/lib/Format/TokenAnnotator.h
index 36de010fc940..ff8e32a56afc 100644
--- a/lib/Format/TokenAnnotator.h
+++ b/lib/Format/TokenAnnotator.h
@@ -13,8 +13,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FORMAT_TOKEN_ANNOTATOR_H
-#define LLVM_CLANG_FORMAT_TOKEN_ANNOTATOR_H
+#ifndef LLVM_CLANG_LIB_FORMAT_TOKENANNOTATOR_H
+#define LLVM_CLANG_LIB_FORMAT_TOKENANNOTATOR_H
#include "UnwrappedLineParser.h"
#include "clang/Format/Format.h"
@@ -27,12 +27,13 @@ namespace format {
enum LineType {
LT_Invalid,
- LT_Other,
- LT_PreprocessorDirective,
- LT_VirtualFunctionDecl,
+ LT_ImportStatement,
LT_ObjCDecl, // An @interface, @implementation, or @protocol line.
LT_ObjCMethodDecl,
- LT_ObjCProperty // An @property line.
+ LT_ObjCProperty, // An @property line.
+ LT_Other,
+ LT_PreprocessorDirective,
+ LT_VirtualFunctionDecl
};
class AnnotatedLine {
@@ -108,8 +109,8 @@ private:
/// \c UnwrappedLine.
class TokenAnnotator {
public:
- TokenAnnotator(const FormatStyle &Style, IdentifierInfo &Ident_in)
- : Style(Style), Ident_in(Ident_in) {}
+ TokenAnnotator(const FormatStyle &Style, const AdditionalKeywords &Keywords)
+ : Style(Style), Keywords(Keywords) {}
/// \brief Adapts the indent levels of comment lines to the indent of the
/// subsequent line.
@@ -139,11 +140,10 @@ private:
const FormatStyle &Style;
- // Contextual keywords:
- IdentifierInfo &Ident_in;
+ const AdditionalKeywords &Keywords;
};
} // end namespace format
} // end namespace clang
-#endif // LLVM_CLANG_FORMAT_TOKEN_ANNOTATOR_H
+#endif
diff --git a/lib/Format/UnwrappedLineFormatter.cpp b/lib/Format/UnwrappedLineFormatter.cpp
new file mode 100644
index 000000000000..ca66e7351641
--- /dev/null
+++ b/lib/Format/UnwrappedLineFormatter.cpp
@@ -0,0 +1,706 @@
+//===--- UnwrappedLineFormatter.cpp - Format C++ code ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "UnwrappedLineFormatter.h"
+#include "WhitespaceManager.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "format-formatter"
+
+namespace clang {
+namespace format {
+
+namespace {
+
+bool startsExternCBlock(const AnnotatedLine &Line) {
+ const FormatToken *Next = Line.First->getNextNonComment();
+ const FormatToken *NextNext = Next ? Next->getNextNonComment() : nullptr;
+ return Line.First->is(tok::kw_extern) && Next && Next->isStringLiteral() &&
+ NextNext && NextNext->is(tok::l_brace);
+}
+
+class LineJoiner {
+public:
+ LineJoiner(const FormatStyle &Style) : Style(Style) {}
+
+ /// \brief Calculates how many lines can be merged into 1 starting at \p I.
+ unsigned
+ tryFitMultipleLinesInOne(unsigned Indent,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator E) {
+ // We can never merge stuff if there are trailing line comments.
+ const AnnotatedLine *TheLine = *I;
+ if (TheLine->Last->is(TT_LineComment))
+ return 0;
+
+ if (Style.ColumnLimit > 0 && Indent > Style.ColumnLimit)
+ return 0;
+
+ unsigned Limit =
+ Style.ColumnLimit == 0 ? UINT_MAX : Style.ColumnLimit - Indent;
+ // If we already exceed the column limit, we set 'Limit' to 0. The different
+ // tryMerge..() functions can then decide whether to still do merging.
+ Limit = TheLine->Last->TotalLength > Limit
+ ? 0
+ : Limit - TheLine->Last->TotalLength;
+
+ if (I + 1 == E || I[1]->Type == LT_Invalid || I[1]->First->MustBreakBefore)
+ return 0;
+
+ // FIXME: TheLine->Level != 0 might or might not be the right check to do.
+ // If necessary, change to something smarter.
+ bool MergeShortFunctions =
+ Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_All ||
+ (Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_Empty &&
+ I[1]->First->is(tok::r_brace)) ||
+ (Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_Inline &&
+ TheLine->Level != 0);
+
+ if (TheLine->Last->is(TT_FunctionLBrace) &&
+ TheLine->First != TheLine->Last) {
+ return MergeShortFunctions ? tryMergeSimpleBlock(I, E, Limit) : 0;
+ }
+ if (TheLine->Last->is(tok::l_brace)) {
+ return Style.BreakBeforeBraces == FormatStyle::BS_Attach
+ ? tryMergeSimpleBlock(I, E, Limit)
+ : 0;
+ }
+ if (I[1]->First->is(TT_FunctionLBrace) &&
+ Style.BreakBeforeBraces != FormatStyle::BS_Attach) {
+ if (I[1]->Last->is(TT_LineComment))
+ return 0;
+
+ // Check for Limit <= 2 to account for the " {".
+ if (Limit <= 2 || (Style.ColumnLimit == 0 && containsMustBreak(TheLine)))
+ return 0;
+ Limit -= 2;
+
+ unsigned MergedLines = 0;
+ if (MergeShortFunctions) {
+ MergedLines = tryMergeSimpleBlock(I + 1, E, Limit);
+ // If we managed to merge the block, count the function header, which is
+ // on a separate line.
+ if (MergedLines > 0)
+ ++MergedLines;
+ }
+ return MergedLines;
+ }
+ if (TheLine->First->is(tok::kw_if)) {
+ return Style.AllowShortIfStatementsOnASingleLine
+ ? tryMergeSimpleControlStatement(I, E, Limit)
+ : 0;
+ }
+ if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while)) {
+ return Style.AllowShortLoopsOnASingleLine
+ ? tryMergeSimpleControlStatement(I, E, Limit)
+ : 0;
+ }
+ if (TheLine->First->isOneOf(tok::kw_case, tok::kw_default)) {
+ return Style.AllowShortCaseLabelsOnASingleLine
+ ? tryMergeShortCaseLabels(I, E, Limit)
+ : 0;
+ }
+ if (TheLine->InPPDirective &&
+ (TheLine->First->HasUnescapedNewline || TheLine->First->IsFirst)) {
+ return tryMergeSimplePPDirective(I, E, Limit);
+ }
+ return 0;
+ }
+
+private:
+ unsigned
+ tryMergeSimplePPDirective(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator E,
+ unsigned Limit) {
+ if (Limit == 0)
+ return 0;
+ if (!I[1]->InPPDirective || I[1]->First->HasUnescapedNewline)
+ return 0;
+ if (I + 2 != E && I[2]->InPPDirective && !I[2]->First->HasUnescapedNewline)
+ return 0;
+ if (1 + I[1]->Last->TotalLength > Limit)
+ return 0;
+ return 1;
+ }
+
+ unsigned tryMergeSimpleControlStatement(
+ SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator E, unsigned Limit) {
+ if (Limit == 0)
+ return 0;
+ if ((Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
+ Style.BreakBeforeBraces == FormatStyle::BS_GNU) &&
+ (I[1]->First->is(tok::l_brace) && !Style.AllowShortBlocksOnASingleLine))
+ return 0;
+ if (I[1]->InPPDirective != (*I)->InPPDirective ||
+ (I[1]->InPPDirective && I[1]->First->HasUnescapedNewline))
+ return 0;
+ Limit = limitConsideringMacros(I + 1, E, Limit);
+ AnnotatedLine &Line = **I;
+ if (Line.Last->isNot(tok::r_paren))
+ return 0;
+ if (1 + I[1]->Last->TotalLength > Limit)
+ return 0;
+ if (I[1]->First->isOneOf(tok::semi, tok::kw_if, tok::kw_for,
+ tok::kw_while, TT_LineComment))
+ return 0;
+ // Only inline simple if's (no nested if or else).
+ if (I + 2 != E && Line.First->is(tok::kw_if) &&
+ I[2]->First->is(tok::kw_else))
+ return 0;
+ return 1;
+ }
+
+ unsigned tryMergeShortCaseLabels(
+ SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator E, unsigned Limit) {
+ if (Limit == 0 || I + 1 == E ||
+ I[1]->First->isOneOf(tok::kw_case, tok::kw_default))
+ return 0;
+ unsigned NumStmts = 0;
+ unsigned Length = 0;
+ bool InPPDirective = I[0]->InPPDirective;
+ for (; NumStmts < 3; ++NumStmts) {
+ if (I + 1 + NumStmts == E)
+ break;
+ const AnnotatedLine *Line = I[1 + NumStmts];
+ if (Line->InPPDirective != InPPDirective)
+ break;
+ if (Line->First->isOneOf(tok::kw_case, tok::kw_default, tok::r_brace))
+ break;
+ if (Line->First->isOneOf(tok::kw_if, tok::kw_for, tok::kw_switch,
+ tok::kw_while, tok::comment))
+ return 0;
+ Length += I[1 + NumStmts]->Last->TotalLength + 1; // 1 for the space.
+ }
+ if (NumStmts == 0 || NumStmts == 3 || Length > Limit)
+ return 0;
+ return NumStmts;
+ }
+
+ unsigned
+ tryMergeSimpleBlock(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator E,
+ unsigned Limit) {
+ AnnotatedLine &Line = **I;
+
+ // Don't merge ObjC @ keywords and methods.
+ if (Style.Language != FormatStyle::LK_Java &&
+ Line.First->isOneOf(tok::at, tok::minus, tok::plus))
+ return 0;
+
+ // Check that the current line allows merging. This depends on whether we
+ // are in a control flow statements as well as several style flags.
+ if (Line.First->isOneOf(tok::kw_else, tok::kw_case))
+ return 0;
+ if (Line.First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_do, tok::kw_try,
+ tok::kw_catch, tok::kw_for, tok::r_brace)) {
+ if (!Style.AllowShortBlocksOnASingleLine)
+ return 0;
+ if (!Style.AllowShortIfStatementsOnASingleLine &&
+ Line.First->is(tok::kw_if))
+ return 0;
+ if (!Style.AllowShortLoopsOnASingleLine &&
+ Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for))
+ return 0;
+ // FIXME: Consider an option to allow short exception handling clauses on
+ // a single line.
+ if (Line.First->isOneOf(tok::kw_try, tok::kw_catch))
+ return 0;
+ }
+
+ FormatToken *Tok = I[1]->First;
+ if (Tok->is(tok::r_brace) && !Tok->MustBreakBefore &&
+ (Tok->getNextNonComment() == nullptr ||
+ Tok->getNextNonComment()->is(tok::semi))) {
+ // We merge empty blocks even if the line exceeds the column limit.
+ Tok->SpacesRequiredBefore = 0;
+ Tok->CanBreakBefore = true;
+ return 1;
+ } else if (Limit != 0 && Line.First->isNot(tok::kw_namespace) &&
+ !startsExternCBlock(Line)) {
+ // We don't merge short records.
+ if (Line.First->isOneOf(tok::kw_class, tok::kw_union, tok::kw_struct))
+ return 0;
+
+ // Check that we still have three lines and they fit into the limit.
+ if (I + 2 == E || I[2]->Type == LT_Invalid)
+ return 0;
+ Limit = limitConsideringMacros(I + 2, E, Limit);
+
+ if (!nextTwoLinesFitInto(I, Limit))
+ return 0;
+
+ // Second, check that the next line does not contain any braces - if it
+ // does, readability declines when putting it into a single line.
+ if (I[1]->Last->is(TT_LineComment))
+ return 0;
+ do {
+ if (Tok->is(tok::l_brace) && Tok->BlockKind != BK_BracedInit)
+ return 0;
+ Tok = Tok->Next;
+ } while (Tok);
+
+ // Last, check that the third line starts with a closing brace.
+ Tok = I[2]->First;
+ if (Tok->isNot(tok::r_brace))
+ return 0;
+
+ return 2;
+ }
+ return 0;
+ }
+
+ /// Returns the modified column limit for \p I if it is inside a macro and
+ /// needs a trailing '\'.
+ unsigned
+ limitConsideringMacros(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator E,
+ unsigned Limit) {
+ if (I[0]->InPPDirective && I + 1 != E &&
+ !I[1]->First->HasUnescapedNewline && !I[1]->First->is(tok::eof)) {
+ return Limit < 2 ? 0 : Limit - 2;
+ }
+ return Limit;
+ }
+
+ bool nextTwoLinesFitInto(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ unsigned Limit) {
+ if (I[1]->First->MustBreakBefore || I[2]->First->MustBreakBefore)
+ return false;
+ return 1 + I[1]->Last->TotalLength + 1 + I[2]->Last->TotalLength <= Limit;
+ }
+
+ bool containsMustBreak(const AnnotatedLine *Line) {
+ for (const FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
+ if (Tok->MustBreakBefore)
+ return true;
+ }
+ return false;
+ }
+
+ const FormatStyle &Style;
+};
+
+class NoColumnLimitFormatter {
+public:
+ NoColumnLimitFormatter(ContinuationIndenter *Indenter) : Indenter(Indenter) {}
+
+ /// \brief Formats the line starting at \p State, simply keeping all of the
+ /// input's line breaking decisions.
+ void format(unsigned FirstIndent, const AnnotatedLine *Line) {
+ LineState State =
+ Indenter->getInitialState(FirstIndent, Line, /*DryRun=*/false);
+ while (State.NextToken) {
+ bool Newline =
+ Indenter->mustBreak(State) ||
+ (Indenter->canBreak(State) && State.NextToken->NewlinesBefore > 0);
+ Indenter->addTokenToState(State, Newline, /*DryRun=*/false);
+ }
+ }
+
+private:
+ ContinuationIndenter *Indenter;
+};
+
+} // namespace
+
+unsigned
+UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
+ bool DryRun, int AdditionalIndent,
+ bool FixBadIndentation) {
+ LineJoiner Joiner(Style);
+
+ // Try to look up already computed penalty in DryRun-mode.
+ std::pair<const SmallVectorImpl<AnnotatedLine *> *, unsigned> CacheKey(
+ &Lines, AdditionalIndent);
+ auto CacheIt = PenaltyCache.find(CacheKey);
+ if (DryRun && CacheIt != PenaltyCache.end())
+ return CacheIt->second;
+
+ assert(!Lines.empty());
+ unsigned Penalty = 0;
+ std::vector<int> IndentForLevel;
+ for (unsigned i = 0, e = Lines[0]->Level; i != e; ++i)
+ IndentForLevel.push_back(Style.IndentWidth * i + AdditionalIndent);
+ const AnnotatedLine *PreviousLine = nullptr;
+ for (SmallVectorImpl<AnnotatedLine *>::const_iterator I = Lines.begin(),
+ E = Lines.end();
+ I != E; ++I) {
+ const AnnotatedLine &TheLine = **I;
+ const FormatToken *FirstTok = TheLine.First;
+ int Offset = getIndentOffset(*FirstTok);
+
+ // Determine indent and try to merge multiple unwrapped lines.
+ unsigned Indent;
+ if (TheLine.InPPDirective) {
+ Indent = TheLine.Level * Style.IndentWidth;
+ } else {
+ while (IndentForLevel.size() <= TheLine.Level)
+ IndentForLevel.push_back(-1);
+ IndentForLevel.resize(TheLine.Level + 1);
+ Indent = getIndent(IndentForLevel, TheLine.Level);
+ }
+ unsigned LevelIndent = Indent;
+ if (static_cast<int>(Indent) + Offset >= 0)
+ Indent += Offset;
+
+ // Merge multiple lines if possible.
+ unsigned MergedLines = Joiner.tryFitMultipleLinesInOne(Indent, I, E);
+ if (MergedLines > 0 && Style.ColumnLimit == 0) {
+ // Disallow line merging if there is a break at the start of one of the
+ // input lines.
+ for (unsigned i = 0; i < MergedLines; ++i) {
+ if (I[i + 1]->First->NewlinesBefore > 0)
+ MergedLines = 0;
+ }
+ }
+ if (!DryRun) {
+ for (unsigned i = 0; i < MergedLines; ++i) {
+ join(*I[i], *I[i + 1]);
+ }
+ }
+ I += MergedLines;
+
+ bool FixIndentation =
+ FixBadIndentation && (LevelIndent != FirstTok->OriginalColumn);
+ if (TheLine.First->is(tok::eof)) {
+ if (PreviousLine && PreviousLine->Affected && !DryRun) {
+ // Remove the file's trailing whitespace.
+ unsigned Newlines = std::min(FirstTok->NewlinesBefore, 1u);
+ Whitespaces->replaceWhitespace(*TheLine.First, Newlines,
+ /*IndentLevel=*/0, /*Spaces=*/0,
+ /*TargetColumn=*/0);
+ }
+ } else if (TheLine.Type != LT_Invalid &&
+ (TheLine.Affected || FixIndentation)) {
+ if (FirstTok->WhitespaceRange.isValid()) {
+ if (!DryRun)
+ formatFirstToken(*TheLine.First, PreviousLine, TheLine.Level, Indent,
+ TheLine.InPPDirective);
+ } else {
+ Indent = LevelIndent = FirstTok->OriginalColumn;
+ }
+
+ // If everything fits on a single line, just put it there.
+ unsigned ColumnLimit = Style.ColumnLimit;
+ if (I + 1 != E) {
+ AnnotatedLine *NextLine = I[1];
+ if (NextLine->InPPDirective && !NextLine->First->HasUnescapedNewline)
+ ColumnLimit = getColumnLimit(TheLine.InPPDirective);
+ }
+
+ if (TheLine.Last->TotalLength + Indent <= ColumnLimit ||
+ TheLine.Type == LT_ImportStatement) {
+ LineState State = Indenter->getInitialState(Indent, &TheLine, DryRun);
+ while (State.NextToken) {
+ formatChildren(State, /*Newline=*/false, /*DryRun=*/false, Penalty);
+ Indenter->addTokenToState(State, /*Newline=*/false, DryRun);
+ }
+ } else if (Style.ColumnLimit == 0) {
+ // FIXME: Implement nested blocks for ColumnLimit = 0.
+ NoColumnLimitFormatter Formatter(Indenter);
+ if (!DryRun)
+ Formatter.format(Indent, &TheLine);
+ } else {
+ Penalty += format(TheLine, Indent, DryRun);
+ }
+
+ if (!TheLine.InPPDirective)
+ IndentForLevel[TheLine.Level] = LevelIndent;
+ } else if (TheLine.ChildrenAffected) {
+ format(TheLine.Children, DryRun);
+ } else {
+ // Format the first token if necessary, and notify the WhitespaceManager
+ // about the unchanged whitespace.
+ for (FormatToken *Tok = TheLine.First; Tok; Tok = Tok->Next) {
+ if (Tok == TheLine.First && (Tok->NewlinesBefore > 0 || Tok->IsFirst)) {
+ unsigned LevelIndent = Tok->OriginalColumn;
+ if (!DryRun) {
+ // Remove trailing whitespace of the previous line.
+ if ((PreviousLine && PreviousLine->Affected) ||
+ TheLine.LeadingEmptyLinesAffected) {
+ formatFirstToken(*Tok, PreviousLine, TheLine.Level, LevelIndent,
+ TheLine.InPPDirective);
+ } else {
+ Whitespaces->addUntouchableToken(*Tok, TheLine.InPPDirective);
+ }
+ }
+
+ if (static_cast<int>(LevelIndent) - Offset >= 0)
+ LevelIndent -= Offset;
+ if (Tok->isNot(tok::comment) && !TheLine.InPPDirective)
+ IndentForLevel[TheLine.Level] = LevelIndent;
+ } else if (!DryRun) {
+ Whitespaces->addUntouchableToken(*Tok, TheLine.InPPDirective);
+ }
+ }
+ }
+ if (!DryRun) {
+ for (FormatToken *Tok = TheLine.First; Tok; Tok = Tok->Next) {
+ Tok->Finalized = true;
+ }
+ }
+ PreviousLine = *I;
+ }
+ PenaltyCache[CacheKey] = Penalty;
+ return Penalty;
+}
+
+unsigned UnwrappedLineFormatter::format(const AnnotatedLine &Line,
+ unsigned FirstIndent, bool DryRun) {
+ LineState State = Indenter->getInitialState(FirstIndent, &Line, DryRun);
+
+ // If the ObjC method declaration does not fit on a line, we should format
+ // it with one arg per line.
+ if (State.Line->Type == LT_ObjCMethodDecl)
+ State.Stack.back().BreakBeforeParameter = true;
+
+ // Find best solution in solution space.
+ return analyzeSolutionSpace(State, DryRun);
+}
+
+void UnwrappedLineFormatter::formatFirstToken(FormatToken &RootToken,
+ const AnnotatedLine *PreviousLine,
+ unsigned IndentLevel,
+ unsigned Indent,
+ bool InPPDirective) {
+ unsigned Newlines =
+ std::min(RootToken.NewlinesBefore, Style.MaxEmptyLinesToKeep + 1);
+ // Remove empty lines before "}" where applicable.
+ if (RootToken.is(tok::r_brace) &&
+ (!RootToken.Next ||
+ (RootToken.Next->is(tok::semi) && !RootToken.Next->Next)))
+ Newlines = std::min(Newlines, 1u);
+ if (Newlines == 0 && !RootToken.IsFirst)
+ Newlines = 1;
+ if (RootToken.IsFirst && !RootToken.HasUnescapedNewline)
+ Newlines = 0;
+
+ // Remove empty lines after "{".
+ if (!Style.KeepEmptyLinesAtTheStartOfBlocks && PreviousLine &&
+ PreviousLine->Last->is(tok::l_brace) &&
+ PreviousLine->First->isNot(tok::kw_namespace) &&
+ !startsExternCBlock(*PreviousLine))
+ Newlines = 1;
+
+ // Insert extra new line before access specifiers.
+ if (PreviousLine && PreviousLine->Last->isOneOf(tok::semi, tok::r_brace) &&
+ RootToken.isAccessSpecifier() && RootToken.NewlinesBefore == 1)
+ ++Newlines;
+
+ // Remove empty lines after access specifiers.
+ if (PreviousLine && PreviousLine->First->isAccessSpecifier())
+ Newlines = std::min(1u, Newlines);
+
+ Whitespaces->replaceWhitespace(RootToken, Newlines, IndentLevel, Indent,
+ Indent, InPPDirective &&
+ !RootToken.HasUnescapedNewline);
+}
+
+/// \brief Get the indent of \p Level from \p IndentForLevel.
+///
+/// \p IndentForLevel must contain the indent for the level \c l
+/// at \p IndentForLevel[l], or a value < 0 if the indent for
+/// that level is unknown.
+unsigned UnwrappedLineFormatter::getIndent(ArrayRef<int> IndentForLevel,
+ unsigned Level) {
+ if (IndentForLevel[Level] != -1)
+ return IndentForLevel[Level];
+ if (Level == 0)
+ return 0;
+ return getIndent(IndentForLevel, Level - 1) + Style.IndentWidth;
+}
+
+void UnwrappedLineFormatter::join(AnnotatedLine &A, const AnnotatedLine &B) {
+ assert(!A.Last->Next);
+ assert(!B.First->Previous);
+ if (B.Affected)
+ A.Affected = true;
+ A.Last->Next = B.First;
+ B.First->Previous = A.Last;
+ B.First->CanBreakBefore = true;
+ unsigned LengthA = A.Last->TotalLength + B.First->SpacesRequiredBefore;
+ for (FormatToken *Tok = B.First; Tok; Tok = Tok->Next) {
+ Tok->TotalLength += LengthA;
+ A.Last = Tok;
+ }
+}
+
+unsigned UnwrappedLineFormatter::analyzeSolutionSpace(LineState &InitialState,
+ bool DryRun) {
+ std::set<LineState *, CompareLineStatePointers> Seen;
+
+ // Increasing count of \c StateNode items we have created. This is used to
+ // create a deterministic order independent of the container.
+ unsigned Count = 0;
+ QueueType Queue;
+
+ // Insert start element into queue.
+ StateNode *Node =
+ new (Allocator.Allocate()) StateNode(InitialState, false, nullptr);
+ Queue.push(QueueItem(OrderedPenalty(0, Count), Node));
+ ++Count;
+
+ unsigned Penalty = 0;
+
+ // While not empty, take first element and follow edges.
+ while (!Queue.empty()) {
+ Penalty = Queue.top().first.first;
+ StateNode *Node = Queue.top().second;
+ if (!Node->State.NextToken) {
+ DEBUG(llvm::dbgs() << "\n---\nPenalty for line: " << Penalty << "\n");
+ break;
+ }
+ Queue.pop();
+
+ // Cut off the analysis of certain solutions if the analysis gets too
+ // complex. See description of IgnoreStackForComparison.
+ if (Count > 10000)
+ Node->State.IgnoreStackForComparison = true;
+
+ if (!Seen.insert(&Node->State).second)
+ // State already examined with lower penalty.
+ continue;
+
+ FormatDecision LastFormat = Node->State.NextToken->Decision;
+ if (LastFormat == FD_Unformatted || LastFormat == FD_Continue)
+ addNextStateToQueue(Penalty, Node, /*NewLine=*/false, &Count, &Queue);
+ if (LastFormat == FD_Unformatted || LastFormat == FD_Break)
+ addNextStateToQueue(Penalty, Node, /*NewLine=*/true, &Count, &Queue);
+ }
+
+ if (Queue.empty()) {
+ // We were unable to find a solution, do nothing.
+ // FIXME: Add diagnostic?
+ DEBUG(llvm::dbgs() << "Could not find a solution.\n");
+ return 0;
+ }
+
+ // Reconstruct the solution.
+ if (!DryRun)
+ reconstructPath(InitialState, Queue.top().second);
+
+ DEBUG(llvm::dbgs() << "Total number of analyzed states: " << Count << "\n");
+ DEBUG(llvm::dbgs() << "---\n");
+
+ return Penalty;
+}
+
+#ifndef NDEBUG
+static void printLineState(const LineState &State) {
+ llvm::dbgs() << "State: ";
+ for (const ParenState &P : State.Stack) {
+ llvm::dbgs() << P.Indent << "|" << P.LastSpace << "|" << P.NestedBlockIndent
+ << " ";
+ }
+ llvm::dbgs() << State.NextToken->TokenText << "\n";
+}
+#endif
+
+void UnwrappedLineFormatter::reconstructPath(LineState &State,
+ StateNode *Current) {
+ std::deque<StateNode *> Path;
+ // We do not need a break before the initial token.
+ while (Current->Previous) {
+ Path.push_front(Current);
+ Current = Current->Previous;
+ }
+ for (std::deque<StateNode *>::iterator I = Path.begin(), E = Path.end();
+ I != E; ++I) {
+ unsigned Penalty = 0;
+ formatChildren(State, (*I)->NewLine, /*DryRun=*/false, Penalty);
+ Penalty += Indenter->addTokenToState(State, (*I)->NewLine, false);
+
+ DEBUG({
+ printLineState((*I)->Previous->State);
+ if ((*I)->NewLine) {
+ llvm::dbgs() << "Penalty for placing "
+ << (*I)->Previous->State.NextToken->Tok.getName() << ": "
+ << Penalty << "\n";
+ }
+ });
+ }
+}
+
+void UnwrappedLineFormatter::addNextStateToQueue(unsigned Penalty,
+ StateNode *PreviousNode,
+ bool NewLine, unsigned *Count,
+ QueueType *Queue) {
+ if (NewLine && !Indenter->canBreak(PreviousNode->State))
+ return;
+ if (!NewLine && Indenter->mustBreak(PreviousNode->State))
+ return;
+
+ StateNode *Node = new (Allocator.Allocate())
+ StateNode(PreviousNode->State, NewLine, PreviousNode);
+ if (!formatChildren(Node->State, NewLine, /*DryRun=*/true, Penalty))
+ return;
+
+ Penalty += Indenter->addTokenToState(Node->State, NewLine, true);
+
+ Queue->push(QueueItem(OrderedPenalty(Penalty, *Count), Node));
+ ++(*Count);
+}
+
+bool UnwrappedLineFormatter::formatChildren(LineState &State, bool NewLine,
+ bool DryRun, unsigned &Penalty) {
+ FormatToken &Previous = *State.NextToken->Previous;
+ const FormatToken *LBrace = State.NextToken->getPreviousNonComment();
+ if (!LBrace || LBrace->isNot(tok::l_brace) || LBrace->BlockKind != BK_Block ||
+ Previous.Children.size() == 0)
+ // The previous token does not open a block. Nothing to do. We don't
+ // assert so that we can simply call this function for all tokens.
+ return true;
+
+ if (NewLine) {
+ int AdditionalIndent = State.Stack.back().Indent -
+ Previous.Children[0]->Level * Style.IndentWidth;
+
+ Penalty += format(Previous.Children, DryRun, AdditionalIndent,
+ /*FixBadIndentation=*/true);
+ return true;
+ }
+
+ if (Previous.Children[0]->First->MustBreakBefore)
+ return false;
+
+ // Cannot merge multiple statements into a single line.
+ if (Previous.Children.size() > 1)
+ return false;
+
+ // Cannot merge into one line if this line ends on a comment.
+ if (Previous.is(tok::comment))
+ return false;
+
+ // We can't put the closing "}" on a line with a trailing comment.
+ if (Previous.Children[0]->Last->isTrailingComment())
+ return false;
+
+ // If the child line exceeds the column limit, we wouldn't want to merge it.
+ // We add +2 for the trailing " }".
+ if (Style.ColumnLimit > 0 &&
+ Previous.Children[0]->Last->TotalLength + State.Column + 2 >
+ Style.ColumnLimit)
+ return false;
+
+ if (!DryRun) {
+ Whitespaces->replaceWhitespace(
+ *Previous.Children[0]->First,
+ /*Newlines=*/0, /*IndentLevel=*/0, /*Spaces=*/1,
+ /*StartOfTokenColumn=*/State.Column, State.Line->InPPDirective);
+ }
+ Penalty += format(*Previous.Children[0], State.Column + 1, DryRun);
+
+ State.Column += 1 + Previous.Children[0]->Last->TotalLength;
+ return true;
+}
+
+} // namespace format
+} // namespace clang
diff --git a/lib/Format/UnwrappedLineFormatter.h b/lib/Format/UnwrappedLineFormatter.h
new file mode 100644
index 000000000000..3ae6dbc4db0b
--- /dev/null
+++ b/lib/Format/UnwrappedLineFormatter.h
@@ -0,0 +1,168 @@
+//===--- UnwrappedLineFormatter.h - Format C++ code -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Implements a combinartorial exploration of all the different
+/// linebreaks unwrapped lines can be formatted in.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEFORMATTER_H
+#define LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEFORMATTER_H
+
+#include "ContinuationIndenter.h"
+#include "clang/Format/Format.h"
+#include <map>
+#include <queue>
+#include <string>
+
+namespace clang {
+namespace format {
+
+class ContinuationIndenter;
+class WhitespaceManager;
+
+class UnwrappedLineFormatter {
+public:
+ UnwrappedLineFormatter(ContinuationIndenter *Indenter,
+ WhitespaceManager *Whitespaces,
+ const FormatStyle &Style)
+ : Indenter(Indenter), Whitespaces(Whitespaces), Style(Style) {}
+
+ unsigned format(const SmallVectorImpl<AnnotatedLine *> &Lines, bool DryRun,
+ int AdditionalIndent = 0, bool FixBadIndentation = false);
+
+private:
+ /// \brief Formats an \c AnnotatedLine and returns the penalty.
+ ///
+ /// If \p DryRun is \c false, directly applies the changes.
+ unsigned format(const AnnotatedLine &Line, unsigned FirstIndent,
+ bool DryRun);
+
+ /// \brief An edge in the solution space from \c Previous->State to \c State,
+ /// inserting a newline dependent on the \c NewLine.
+ struct StateNode {
+ StateNode(const LineState &State, bool NewLine, StateNode *Previous)
+ : State(State), NewLine(NewLine), Previous(Previous) {}
+ LineState State;
+ bool NewLine;
+ StateNode *Previous;
+ };
+
+ /// \brief A pair of <penalty, count> that is used to prioritize the BFS on.
+ ///
+ /// In case of equal penalties, we want to prefer states that were inserted
+ /// first. During state generation we make sure that we insert states first
+ /// that break the line as late as possible.
+ typedef std::pair<unsigned, unsigned> OrderedPenalty;
+
+ /// \brief An item in the prioritized BFS search queue. The \c StateNode's
+ /// \c State has the given \c OrderedPenalty.
+ typedef std::pair<OrderedPenalty, StateNode *> QueueItem;
+
+ /// \brief The BFS queue type.
+ typedef std::priority_queue<QueueItem, std::vector<QueueItem>,
+ std::greater<QueueItem> > QueueType;
+
+ /// \brief Get the offset of the line relatively to the level.
+ ///
+ /// For example, 'public:' labels in classes are offset by 1 or 2
+ /// characters to the left from their level.
+ int getIndentOffset(const FormatToken &RootToken) {
+ if (Style.Language == FormatStyle::LK_Java)
+ return 0;
+ if (RootToken.isAccessSpecifier(false) || RootToken.isObjCAccessSpecifier())
+ return Style.AccessModifierOffset;
+ return 0;
+ }
+
+ /// \brief Add a new line and the required indent before the first Token
+ /// of the \c UnwrappedLine if there was no structural parsing error.
+ void formatFirstToken(FormatToken &RootToken,
+ const AnnotatedLine *PreviousLine, unsigned IndentLevel,
+ unsigned Indent, bool InPPDirective);
+
+ /// \brief Get the indent of \p Level from \p IndentForLevel.
+ ///
+ /// \p IndentForLevel must contain the indent for the level \c l
+ /// at \p IndentForLevel[l], or a value < 0 if the indent for
+ /// that level is unknown.
+ unsigned getIndent(ArrayRef<int> IndentForLevel, unsigned Level);
+
+ void join(AnnotatedLine &A, const AnnotatedLine &B);
+
+ unsigned getColumnLimit(bool InPPDirective) const {
+ // In preprocessor directives reserve two chars for trailing " \"
+ return Style.ColumnLimit - (InPPDirective ? 2 : 0);
+ }
+
+ struct CompareLineStatePointers {
+ bool operator()(LineState *obj1, LineState *obj2) const {
+ return *obj1 < *obj2;
+ }
+ };
+
+ /// \brief Analyze the entire solution space starting from \p InitialState.
+ ///
+ /// This implements a variant of Dijkstra's algorithm on the graph that spans
+ /// the solution space (\c LineStates are the nodes). The algorithm tries to
+ /// find the shortest path (the one with lowest penalty) from \p InitialState
+ /// to a state where all tokens are placed. Returns the penalty.
+ ///
+ /// If \p DryRun is \c false, directly applies the changes.
+ unsigned analyzeSolutionSpace(LineState &InitialState, bool DryRun = false);
+
+ void reconstructPath(LineState &State, StateNode *Current);
+
+ /// \brief Add the following state to the analysis queue \c Queue.
+ ///
+ /// Assume the current state is \p PreviousNode and has been reached with a
+ /// penalty of \p Penalty. Insert a line break if \p NewLine is \c true.
+ void addNextStateToQueue(unsigned Penalty, StateNode *PreviousNode,
+ bool NewLine, unsigned *Count, QueueType *Queue);
+
+ /// \brief If the \p State's next token is an r_brace closing a nested block,
+ /// format the nested block before it.
+ ///
+ /// Returns \c true if all children could be placed successfully and adapts
+ /// \p Penalty as well as \p State. If \p DryRun is false, also directly
+ /// creates changes using \c Whitespaces.
+ ///
+ /// The crucial idea here is that children always get formatted upon
+ /// encountering the closing brace right after the nested block. Now, if we
+ /// are currently trying to keep the "}" on the same line (i.e. \p NewLine is
+ /// \c false), the entire block has to be kept on the same line (which is only
+ /// possible if it fits on the line, only contains a single statement, etc.
+ ///
+ /// If \p NewLine is true, we format the nested block on separate lines, i.e.
+ /// break after the "{", format all lines with correct indentation and the put
+ /// the closing "}" on yet another new line.
+ ///
+ /// This enables us to keep the simple structure of the
+ /// \c UnwrappedLineFormatter, where we only have two options for each token:
+ /// break or don't break.
+ bool formatChildren(LineState &State, bool NewLine, bool DryRun,
+ unsigned &Penalty);
+
+ ContinuationIndenter *Indenter;
+ WhitespaceManager *Whitespaces;
+ FormatStyle Style;
+
+ llvm::SpecificBumpPtrAllocator<StateNode> Allocator;
+
+ // Cache to store the penalty of formatting a vector of AnnotatedLines
+ // starting from a specific additional offset. Improves performance if there
+ // are many nested blocks.
+ std::map<std::pair<const SmallVectorImpl<AnnotatedLine *> *, unsigned>,
+ unsigned> PenaltyCache;
+};
+} // end namespace format
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEFORMATTER_H
diff --git a/lib/Format/UnwrappedLineParser.cpp b/lib/Format/UnwrappedLineParser.cpp
index 20dd573162cc..ec04af5231be 100644
--- a/lib/Format/UnwrappedLineParser.cpp
+++ b/lib/Format/UnwrappedLineParser.cpp
@@ -122,14 +122,13 @@ class ScopedLineState {
public:
ScopedLineState(UnwrappedLineParser &Parser,
bool SwitchToPreprocessorLines = false)
- : Parser(Parser) {
- OriginalLines = Parser.CurrentLines;
+ : Parser(Parser), OriginalLines(Parser.CurrentLines) {
if (SwitchToPreprocessorLines)
Parser.CurrentLines = &Parser.PreprocessorDirectives;
else if (!Parser.Line->Tokens.empty())
Parser.CurrentLines = &Parser.Line->Tokens.back().Children;
- PreBlockLine = Parser.Line.release();
- Parser.Line.reset(new UnwrappedLine());
+ PreBlockLine = std::move(Parser.Line);
+ Parser.Line = llvm::make_unique<UnwrappedLine>();
Parser.Line->Level = PreBlockLine->Level;
Parser.Line->InPPDirective = PreBlockLine->InPPDirective;
}
@@ -139,7 +138,7 @@ public:
Parser.addUnwrappedLine();
}
assert(Parser.Line->Tokens.empty());
- Parser.Line.reset(PreBlockLine);
+ Parser.Line = std::move(PreBlockLine);
if (Parser.CurrentLines == &Parser.PreprocessorDirectives)
Parser.MustBreakBeforeNextToken = true;
Parser.CurrentLines = OriginalLines;
@@ -148,7 +147,7 @@ public:
private:
UnwrappedLineParser &Parser;
- UnwrappedLine *PreBlockLine;
+ std::unique_ptr<UnwrappedLine> PreBlockLine;
SmallVectorImpl<UnwrappedLine> *OriginalLines;
};
@@ -203,12 +202,13 @@ private:
} // end anonymous namespace
UnwrappedLineParser::UnwrappedLineParser(const FormatStyle &Style,
+ const AdditionalKeywords &Keywords,
ArrayRef<FormatToken *> Tokens,
UnwrappedLineConsumer &Callback)
: Line(new UnwrappedLine), MustBreakBeforeNextToken(false),
CurrentLines(&Lines), StructuralError(false), Style(Style),
- Tokens(nullptr), Callback(Callback), AllTokens(Tokens),
- PPBranchLevel(-1) {}
+ Keywords(Keywords), Tokens(nullptr), Callback(Callback),
+ AllTokens(Tokens), PPBranchLevel(-1) {}
void UnwrappedLineParser::reset() {
PPBranchLevel = -1;
@@ -311,7 +311,6 @@ void UnwrappedLineParser::calculateBraceTypes() {
// parse macros, so this will magically work inside macro
// definitions, too.
unsigned StoredPosition = Tokens->getPosition();
- unsigned Position = StoredPosition;
FormatToken *Tok = FormatTok;
// Keep a stack of positions of lbrace tokens. We will
// update information about whether an lbrace starts a
@@ -354,7 +353,7 @@ void UnwrappedLineParser::calculateBraceTypes() {
ProbablyBracedList =
NextTok->isOneOf(tok::comma, tok::semi, tok::period, tok::colon,
tok::r_paren, tok::r_square, tok::l_brace,
- tok::l_paren) ||
+ tok::l_paren, tok::ellipsis) ||
(NextTok->isBinaryOperator() && !NextIsObjCMethod);
}
if (ProbablyBracedList) {
@@ -382,7 +381,6 @@ void UnwrappedLineParser::calculateBraceTypes() {
break;
}
Tok = NextTok;
- Position += ReadTokens;
} while (Tok->Tok.isNot(tok::eof) && !LBraceStack.empty());
// Assume other blocks for all unclosed opening braces.
for (unsigned i = 0, e = LBraceStack.size(); i != e; ++i) {
@@ -420,6 +418,8 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel,
}
static bool IsGoogScope(const UnwrappedLine &Line) {
+ // FIXME: Closure-library specific stuff should not be hard-coded but be
+ // configurable.
if (Line.Tokens.size() < 4)
return false;
auto I = Line.Tokens.begin();
@@ -435,6 +435,19 @@ static bool IsGoogScope(const UnwrappedLine &Line) {
return I->Tok->is(tok::l_paren);
}
+static bool ShouldBreakBeforeBrace(const FormatStyle &Style,
+ const FormatToken &InitialToken) {
+ switch (Style.BreakBeforeBraces) {
+ case FormatStyle::BS_Linux:
+ return InitialToken.isOneOf(tok::kw_namespace, tok::kw_class);
+ case FormatStyle::BS_Allman:
+ case FormatStyle::BS_GNU:
+ return true;
+ default:
+ return false;
+ }
+}
+
void UnwrappedLineParser::parseChildBlock() {
FormatTok->BlockKind = BK_Block;
nextToken();
@@ -646,6 +659,20 @@ void UnwrappedLineParser::parseStructuralElement() {
break;
}
break;
+ case tok::kw_asm:
+ nextToken();
+ if (FormatTok->is(tok::l_brace)) {
+ nextToken();
+ while (FormatTok && FormatTok->isNot(tok::eof)) {
+ if (FormatTok->is(tok::r_brace)) {
+ nextToken();
+ break;
+ }
+ FormatTok->Finalized = true;
+ nextToken();
+ }
+ }
+ break;
case tok::kw_namespace:
parseNamespace();
return;
@@ -659,7 +686,10 @@ void UnwrappedLineParser::parseStructuralElement() {
case tok::kw_public:
case tok::kw_protected:
case tok::kw_private:
- parseAccessSpecifier();
+ if (Style.Language == FormatStyle::LK_Java)
+ nextToken();
+ else
+ parseAccessSpecifier();
return;
case tok::kw_if:
parseIfThenElse();
@@ -717,8 +747,8 @@ void UnwrappedLineParser::parseStructuralElement() {
break;
case tok::kw_typedef:
nextToken();
- // FIXME: Use the IdentifierTable instead.
- if (FormatTok->TokenText == "NS_ENUM")
+ if (FormatTok->isOneOf(Keywords.kw_NS_ENUM, Keywords.kw_NS_OPTIONS,
+ Keywords.kw_CF_ENUM, Keywords.kw_CF_OPTIONS))
parseEnum();
break;
case tok::kw_struct:
@@ -728,6 +758,13 @@ void UnwrappedLineParser::parseStructuralElement() {
// A record declaration or definition is always the start of a structural
// element.
break;
+ case tok::period:
+ nextToken();
+ // In Java, classes have an implicit static member "class".
+ if (Style.Language == FormatStyle::LK_Java && FormatTok &&
+ FormatTok->is(tok::kw_class))
+ nextToken();
+ break;
case tok::semi:
nextToken();
addUnwrappedLine();
@@ -783,17 +820,14 @@ void UnwrappedLineParser::parseStructuralElement() {
parseLabel();
return;
}
- // Recognize function-like macro usages without trailing semicolon.
- if (FormatTok->Tok.is(tok::l_paren)) {
+ // Recognize function-like macro usages without trailing semicolon as
+ // well as free-standing macrose like Q_OBJECT.
+ bool FunctionLike = FormatTok->is(tok::l_paren);
+ if (FunctionLike)
parseParens();
- if (FormatTok->NewlinesBefore > 0 &&
- tokenCanStartNewLine(FormatTok->Tok) && Text == Text.upper()) {
- addUnwrappedLine();
- return;
- }
- } else if (FormatTok->HasUnescapedNewline && Text.size() >= 5 &&
- Text == Text.upper()) {
- // Recognize free-standing macros like Q_OBJECT.
+ if (FormatTok->NewlinesBefore > 0 &&
+ (Text.size() >= 5 || FunctionLike) &&
+ tokenCanStartNewLine(FormatTok->Tok) && Text == Text.upper()) {
addUnwrappedLine();
return;
}
@@ -820,7 +854,8 @@ bool UnwrappedLineParser::tryToParseLambda() {
// FIXME: This is a dirty way to access the previous token. Find a better
// solution.
if (!Line->Tokens.empty() &&
- (Line->Tokens.back().Tok->isOneOf(tok::identifier, tok::kw_operator) ||
+ (Line->Tokens.back().Tok->isOneOf(tok::identifier, tok::kw_operator,
+ tok::kw_new, tok::kw_delete) ||
Line->Tokens.back().Tok->closesScope() ||
Line->Tokens.back().Tok->isSimpleTypeSpecifier())) {
nextToken();
@@ -842,6 +877,10 @@ bool UnwrappedLineParser::tryToParseLambda() {
case tok::l_paren:
parseParens();
break;
+ case tok::amp:
+ case tok::star:
+ case tok::kw_const:
+ case tok::comma:
case tok::less:
case tok::greater:
case tok::identifier:
@@ -947,7 +986,7 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons) {
// replace this by using parseAssigmentExpression() inside.
do {
if (Style.Language == FormatStyle::LK_JavaScript &&
- FormatTok->TokenText == "function") {
+ FormatTok->is(Keywords.kw_function)) {
tryToParseJSFunction();
continue;
}
@@ -994,6 +1033,8 @@ void UnwrappedLineParser::parseParens() {
switch (FormatTok->Tok.getKind()) {
case tok::l_paren:
parseParens();
+ if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_brace))
+ parseChildBlock();
break;
case tok::r_paren:
nextToken();
@@ -1004,17 +1045,23 @@ void UnwrappedLineParser::parseParens() {
case tok::l_square:
tryToParseLambda();
break;
- case tok::l_brace: {
+ case tok::l_brace:
if (!tryToParseBracedList()) {
parseChildBlock();
}
break;
- }
case tok::at:
nextToken();
if (FormatTok->Tok.is(tok::l_brace))
parseBracedList();
break;
+ case tok::identifier:
+ if (Style.Language == FormatStyle::LK_JavaScript &&
+ FormatTok->is(Keywords.kw_function))
+ tryToParseJSFunction();
+ else
+ nextToken();
+ break;
default:
nextToken();
break;
@@ -1080,6 +1127,8 @@ void UnwrappedLineParser::parseIfThenElse() {
--Line->Level;
}
if (FormatTok->Tok.is(tok::kw_else)) {
+ if (Style.BreakBeforeBraces == FormatStyle::BS_Stroustrup)
+ addUnwrappedLine();
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
@@ -1115,6 +1164,10 @@ void UnwrappedLineParser::parseTryCatch() {
nextToken();
}
}
+ // Parse try with resource.
+ if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_paren)) {
+ parseParens();
+ }
if (FormatTok->is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
@@ -1136,8 +1189,9 @@ void UnwrappedLineParser::parseTryCatch() {
--Line->Level;
}
while (FormatTok->is(tok::kw_catch) ||
- (Style.Language == FormatStyle::LK_JavaScript &&
- FormatTok->TokenText == "finally")) {
+ ((Style.Language == FormatStyle::LK_Java ||
+ Style.Language == FormatStyle::LK_JavaScript) &&
+ FormatTok->is(Keywords.kw_finally))) {
nextToken();
while (FormatTok->isNot(tok::l_brace)) {
if (FormatTok->is(tok::l_paren)) {
@@ -1166,13 +1220,13 @@ void UnwrappedLineParser::parseTryCatch() {
void UnwrappedLineParser::parseNamespace() {
assert(FormatTok->Tok.is(tok::kw_namespace) && "'namespace' expected");
+
+ const FormatToken &InitialToken = *FormatTok;
nextToken();
if (FormatTok->Tok.is(tok::identifier))
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BreakBeforeBraces == FormatStyle::BS_Linux ||
- Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
- Style.BreakBeforeBraces == FormatStyle::BS_GNU)
+ if (ShouldBreakBeforeBrace(Style, InitialToken))
addUnwrappedLine();
bool AddLevel = Style.NamespaceIndentation == FormatStyle::NI_All ||
@@ -1294,43 +1348,115 @@ void UnwrappedLineParser::parseAccessSpecifier() {
}
void UnwrappedLineParser::parseEnum() {
- if (FormatTok->Tok.is(tok::kw_enum)) {
- // Won't be 'enum' for NS_ENUMs.
+ // Won't be 'enum' for NS_ENUMs.
+ if (FormatTok->Tok.is(tok::kw_enum))
nextToken();
- }
+
// Eat up enum class ...
if (FormatTok->Tok.is(tok::kw_class) || FormatTok->Tok.is(tok::kw_struct))
nextToken();
while (FormatTok->Tok.getIdentifierInfo() ||
- FormatTok->isOneOf(tok::colon, tok::coloncolon)) {
+ FormatTok->isOneOf(tok::colon, tok::coloncolon, tok::less,
+ tok::greater, tok::comma, tok::question)) {
nextToken();
// We can have macros or attributes in between 'enum' and the enum name.
- if (FormatTok->Tok.is(tok::l_paren)) {
+ if (FormatTok->is(tok::l_paren))
parseParens();
- }
- if (FormatTok->Tok.is(tok::identifier))
+ if (FormatTok->is(tok::identifier))
nextToken();
}
- if (FormatTok->Tok.is(tok::l_brace)) {
- FormatTok->BlockKind = BK_Block;
- bool HasError = !parseBracedList(/*ContinueOnSemicolons=*/true);
- if (HasError) {
- if (FormatTok->is(tok::semi))
- nextToken();
- addUnwrappedLine();
- }
+
+ // Just a declaration or something is wrong.
+ if (FormatTok->isNot(tok::l_brace))
+ return;
+ FormatTok->BlockKind = BK_Block;
+
+ if (Style.Language == FormatStyle::LK_Java) {
+ // Java enums are different.
+ parseJavaEnumBody();
+ return;
}
+
+ // Parse enum body.
+ bool HasError = !parseBracedList(/*ContinueOnSemicolons=*/true);
+ if (HasError) {
+ if (FormatTok->is(tok::semi))
+ nextToken();
+ addUnwrappedLine();
+ }
+
// We fall through to parsing a structural element afterwards, so that in
// enum A {} n, m;
// "} n, m;" will end up in one unwrapped line.
}
+void UnwrappedLineParser::parseJavaEnumBody() {
+ // Determine whether the enum is simple, i.e. does not have a semicolon or
+ // constants with class bodies. Simple enums can be formatted like braced
+ // lists, contracted to a single line, etc.
+ unsigned StoredPosition = Tokens->getPosition();
+ bool IsSimple = true;
+ FormatToken *Tok = Tokens->getNextToken();
+ while (Tok) {
+ if (Tok->is(tok::r_brace))
+ break;
+ if (Tok->isOneOf(tok::l_brace, tok::semi)) {
+ IsSimple = false;
+ break;
+ }
+ // FIXME: This will also mark enums with braces in the arguments to enum
+ // constants as "not simple". This is probably fine in practice, though.
+ Tok = Tokens->getNextToken();
+ }
+ FormatTok = Tokens->setPosition(StoredPosition);
+
+ if (IsSimple) {
+ parseBracedList();
+ addUnwrappedLine();
+ return;
+ }
+
+ // Parse the body of a more complex enum.
+ // First add a line for everything up to the "{".
+ nextToken();
+ addUnwrappedLine();
+ ++Line->Level;
+
+ // Parse the enum constants.
+ while (FormatTok) {
+ if (FormatTok->is(tok::l_brace)) {
+ // Parse the constant's class body.
+ parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/true,
+ /*MunchSemi=*/false);
+ } else if (FormatTok->is(tok::l_paren)) {
+ parseParens();
+ } else if (FormatTok->is(tok::comma)) {
+ nextToken();
+ addUnwrappedLine();
+ } else if (FormatTok->is(tok::semi)) {
+ nextToken();
+ addUnwrappedLine();
+ break;
+ } else if (FormatTok->is(tok::r_brace)) {
+ addUnwrappedLine();
+ break;
+ } else {
+ nextToken();
+ }
+ }
+
+ // Parse the class body after the enum's ";" if any.
+ parseLevel(/*HasOpeningBrace=*/true);
+ nextToken();
+ --Line->Level;
+ addUnwrappedLine();
+}
+
void UnwrappedLineParser::parseRecord() {
+ const FormatToken &InitialToken = *FormatTok;
nextToken();
- if (FormatTok->Tok.is(tok::identifier) ||
- FormatTok->Tok.is(tok::kw___attribute) ||
- FormatTok->Tok.is(tok::kw___declspec) ||
- FormatTok->Tok.is(tok::kw_alignas)) {
+ if (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::kw___attribute,
+ tok::kw___declspec, tok::kw_alignas)) {
nextToken();
// We can have macros or attributes in between 'class' and the class name.
if (FormatTok->Tok.is(tok::l_paren)) {
@@ -1338,9 +1464,10 @@ void UnwrappedLineParser::parseRecord() {
}
// The actual identifier can be a nested name specifier, and in macros
// it is often token-pasted.
- while (FormatTok->Tok.is(tok::identifier) ||
- FormatTok->Tok.is(tok::coloncolon) ||
- FormatTok->Tok.is(tok::hashhash))
+ while (FormatTok->is(tok::identifier) || FormatTok->is(tok::coloncolon) ||
+ FormatTok->is(tok::hashhash) ||
+ (Style.Language == FormatStyle::LK_Java &&
+ FormatTok->isOneOf(tok::period, tok::comma)))
nextToken();
// Note that parsing away template declarations here leads to incorrectly
@@ -1362,9 +1489,7 @@ void UnwrappedLineParser::parseRecord() {
}
}
if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BreakBeforeBraces == FormatStyle::BS_Linux ||
- Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
- Style.BreakBeforeBraces == FormatStyle::BS_GNU)
+ if (ShouldBreakBeforeBrace(Style, InitialToken))
addUnwrappedLine();
parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/true,
@@ -1373,6 +1498,9 @@ void UnwrappedLineParser::parseRecord() {
// We fall through to parsing a structural element afterwards, so
// class A {} n, m;
// will end up in one unwrapped line.
+ // This does not apply for Java.
+ if (Style.Language == FormatStyle::LK_Java)
+ addUnwrappedLine();
}
void UnwrappedLineParser::parseObjCProtocolList() {
diff --git a/lib/Format/UnwrappedLineParser.h b/lib/Format/UnwrappedLineParser.h
index c9182e9d71d3..3218afecad30 100644
--- a/lib/Format/UnwrappedLineParser.h
+++ b/lib/Format/UnwrappedLineParser.h
@@ -13,13 +13,14 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FORMAT_UNWRAPPED_LINE_PARSER_H
-#define LLVM_CLANG_FORMAT_UNWRAPPED_LINE_PARSER_H
+#ifndef LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEPARSER_H
+#define LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEPARSER_H
#include "FormatToken.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Format/Format.h"
#include <list>
+#include <stack>
namespace clang {
namespace format {
@@ -59,7 +60,9 @@ class FormatTokenSource;
class UnwrappedLineParser {
public:
- UnwrappedLineParser(const FormatStyle &Style, ArrayRef<FormatToken *> Tokens,
+ UnwrappedLineParser(const FormatStyle &Style,
+ const AdditionalKeywords &Keywords,
+ ArrayRef<FormatToken *> Tokens,
UnwrappedLineConsumer &Callback);
/// Returns true in case of a structural error.
@@ -94,6 +97,7 @@ private:
void parseNamespace();
void parseAccessSpecifier();
void parseEnum();
+ void parseJavaEnumBody();
void parseRecord();
void parseObjCProtocolList();
void parseObjCUntilAtEnd();
@@ -157,6 +161,8 @@ private:
bool StructuralError;
const FormatStyle &Style;
+ const AdditionalKeywords &Keywords;
+
FormatTokenSource *Tokens;
UnwrappedLineConsumer &Callback;
@@ -214,4 +220,4 @@ inline UnwrappedLine::UnwrappedLine()
} // end namespace format
} // end namespace clang
-#endif // LLVM_CLANG_FORMAT_UNWRAPPED_LINE_PARSER_H
+#endif
diff --git a/lib/Format/WhitespaceManager.cpp b/lib/Format/WhitespaceManager.cpp
index 47b94de4c6f7..bf1207e59c90 100644
--- a/lib/Format/WhitespaceManager.cpp
+++ b/lib/Format/WhitespaceManager.cpp
@@ -81,7 +81,7 @@ void WhitespaceManager::replaceWhitespaceInToken(
// FIXME: We still need to take this change in account to properly
// calculate the new length of the comment and to calculate the changes
// for which to do the alignment when aligning comments.
- Tok.Type == TT_LineComment && Newlines > 0 ? tok::comment : tok::unknown,
+ Tok.is(TT_LineComment) && Newlines > 0 ? tok::comment : tok::unknown,
InPPDirective && !Tok.IsFirst));
}
@@ -163,15 +163,17 @@ void WhitespaceManager::alignTrailingComments() {
Changes[i - 1].StartOfTokenColumn == 0;
bool WasAlignedWithStartOfNextLine = false;
if (Changes[i].NewlinesBefore == 1) { // A comment on its own line.
+ unsigned CommentColumn = SourceMgr.getSpellingColumnNumber(
+ Changes[i].OriginalWhitespaceRange.getEnd());
for (unsigned j = i + 1; j != e; ++j) {
if (Changes[j].Kind != tok::comment) { // Skip over comments.
+ unsigned NextColumn = SourceMgr.getSpellingColumnNumber(
+ Changes[j].OriginalWhitespaceRange.getEnd());
// The start of the next token was previously aligned with the
// start of this comment.
WasAlignedWithStartOfNextLine =
- (SourceMgr.getSpellingColumnNumber(
- Changes[i].OriginalWhitespaceRange.getEnd()) ==
- SourceMgr.getSpellingColumnNumber(
- Changes[j].OriginalWhitespaceRange.getEnd()));
+ CommentColumn == NextColumn ||
+ CommentColumn == NextColumn + Style.IndentWidth;
break;
}
}
diff --git a/lib/Format/WhitespaceManager.h b/lib/Format/WhitespaceManager.h
index 189b1aefa03f..28730d457eba 100644
--- a/lib/Format/WhitespaceManager.h
+++ b/lib/Format/WhitespaceManager.h
@@ -13,8 +13,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FORMAT_WHITESPACEMANAGER_H
-#define LLVM_CLANG_FORMAT_WHITESPACEMANAGER_H
+#ifndef LLVM_CLANG_LIB_FORMAT_WHITESPACEMANAGER_H
+#define LLVM_CLANG_LIB_FORMAT_WHITESPACEMANAGER_H
#include "TokenAnnotator.h"
#include "clang/Basic/SourceManager.h"
@@ -200,4 +200,4 @@ private:
} // namespace format
} // namespace clang
-#endif // LLVM_CLANG_FORMAT_WHITESPACEMANAGER_H
+#endif
diff --git a/lib/Frontend/ASTConsumers.cpp b/lib/Frontend/ASTConsumers.cpp
index 54a6d474c18a..f53c614b0a30 100644
--- a/lib/Frontend/ASTConsumers.cpp
+++ b/lib/Frontend/ASTConsumers.cpp
@@ -57,7 +57,8 @@ namespace {
bool ShowColors = Out.has_colors();
if (ShowColors)
Out.changeColor(raw_ostream::BLUE);
- Out << (Dump ? "Dumping " : "Printing ") << getName(D) << ":\n";
+ Out << ((Dump || DumpLookups) ? "Dumping " : "Printing ") << getName(D)
+ << ":\n";
if (ShowColors)
Out.resetColor();
print(D);
@@ -79,9 +80,13 @@ namespace {
}
void print(Decl *D) {
if (DumpLookups) {
- if (DeclContext *DC = dyn_cast<DeclContext>(D))
- DC->dumpLookups(Out);
- else
+ if (DeclContext *DC = dyn_cast<DeclContext>(D)) {
+ if (DC == DC->getPrimaryContext())
+ DC->dumpLookups(Out, Dump);
+ else
+ Out << "Lookup map is in primary DeclContext "
+ << DC->getPrimaryContext() << "\n";
+ } else
Out << "Not a DeclContext\n";
} else if (Dump)
D->dump(Out);
@@ -118,17 +123,21 @@ namespace {
};
} // end anonymous namespace
-ASTConsumer *clang::CreateASTPrinter(raw_ostream *Out,
- StringRef FilterString) {
- return new ASTPrinter(Out, /*Dump=*/ false, FilterString);
+std::unique_ptr<ASTConsumer> clang::CreateASTPrinter(raw_ostream *Out,
+ StringRef FilterString) {
+ return llvm::make_unique<ASTPrinter>(Out, /*Dump=*/false, FilterString);
}
-ASTConsumer *clang::CreateASTDumper(StringRef FilterString, bool DumpLookups) {
- return new ASTPrinter(nullptr, /*Dump=*/true, FilterString, DumpLookups);
+std::unique_ptr<ASTConsumer> clang::CreateASTDumper(StringRef FilterString,
+ bool DumpDecls,
+ bool DumpLookups) {
+ assert((DumpDecls || DumpLookups) && "nothing to dump");
+ return llvm::make_unique<ASTPrinter>(nullptr, DumpDecls, FilterString,
+ DumpLookups);
}
-ASTConsumer *clang::CreateASTDeclNodeLister() {
- return new ASTDeclNodeLister(nullptr);
+std::unique_ptr<ASTConsumer> clang::CreateASTDeclNodeLister() {
+ return llvm::make_unique<ASTDeclNodeLister>(nullptr);
}
//===----------------------------------------------------------------------===//
@@ -164,8 +173,9 @@ void ASTViewer::HandleTopLevelSingleDecl(Decl *D) {
}
}
-
-ASTConsumer *clang::CreateASTViewer() { return new ASTViewer(); }
+std::unique_ptr<ASTConsumer> clang::CreateASTViewer() {
+ return llvm::make_unique<ASTViewer>();
+}
//===----------------------------------------------------------------------===//
/// DeclContextPrinter - Decl and DeclContext Visualization
@@ -475,6 +485,6 @@ void DeclContextPrinter::PrintDeclContext(const DeclContext* DC,
}
}
}
-ASTConsumer *clang::CreateDeclContextPrinter() {
- return new DeclContextPrinter();
+std::unique_ptr<ASTConsumer> clang::CreateDeclContextPrinter() {
+ return llvm::make_unique<DeclContextPrinter>();
}
diff --git a/lib/Frontend/ASTMerge.cpp b/lib/Frontend/ASTMerge.cpp
index ff6434c56945..216ac6a16984 100644
--- a/lib/Frontend/ASTMerge.cpp
+++ b/lib/Frontend/ASTMerge.cpp
@@ -16,8 +16,8 @@
using namespace clang;
-ASTConsumer *ASTMergeAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+ASTMergeAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
return AdaptedAction->CreateASTConsumer(CI, InFile);
}
@@ -45,8 +45,8 @@ void ASTMergeAction::ExecuteAction() {
new ForwardingDiagnosticConsumer(
*CI.getDiagnostics().getClient()),
/*ShouldOwnClient=*/true));
- ASTUnit *Unit = ASTUnit::LoadFromASTFile(ASTFiles[I], Diags,
- CI.getFileSystemOpts(), false);
+ std::unique_ptr<ASTUnit> Unit = ASTUnit::LoadFromASTFile(
+ ASTFiles[I], Diags, CI.getFileSystemOpts(), false);
if (!Unit)
continue;
@@ -66,8 +66,6 @@ void ASTMergeAction::ExecuteAction() {
Importer.Import(D);
}
-
- delete Unit;
}
AdaptedAction->ExecuteAction();
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index fc44d9f1b4c0..a3998fa351de 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -105,7 +105,8 @@ static llvm::sys::SmartMutex<false> &getOnDiskMutex() {
static void cleanupOnDiskMapAtExit();
-typedef llvm::DenseMap<const ASTUnit *, OnDiskData *> OnDiskDataMap;
+typedef llvm::DenseMap<const ASTUnit *,
+ std::unique_ptr<OnDiskData>> OnDiskDataMap;
static OnDiskDataMap &getOnDiskDataMap() {
static OnDiskDataMap M;
static bool hasRegisteredAtExit = false;
@@ -132,9 +133,9 @@ static OnDiskData &getOnDiskData(const ASTUnit *AU) {
// DenseMap.
llvm::MutexGuard Guard(getOnDiskMutex());
OnDiskDataMap &M = getOnDiskDataMap();
- OnDiskData *&D = M[AU];
+ auto &D = M[AU];
if (!D)
- D = new OnDiskData();
+ D = llvm::make_unique<OnDiskData>();
return *D;
}
@@ -150,7 +151,6 @@ static void removeOnDiskEntry(const ASTUnit *AU) {
OnDiskDataMap::iterator I = M.find(AU);
if (I != M.end()) {
I->second->Cleanup();
- delete I->second;
M.erase(AU);
}
}
@@ -219,8 +219,8 @@ ASTUnit::ASTUnit(bool _MainFileIsAST)
TUKind(TU_Complete), WantTiming(getenv("LIBCLANG_TIMING")),
OwnsRemappedFileBuffers(true),
NumStoredDiagnosticsFromDriver(0),
- PreambleRebuildCounter(0), SavedMainFileBuffer(nullptr),
- PreambleBuffer(nullptr), NumWarningsInPreamble(0),
+ PreambleRebuildCounter(0),
+ NumWarningsInPreamble(0),
ShouldCacheCodeCompletionResults(false),
IncludeBriefCommentsInCodeCompletion(false), UserFilesAreVolatile(false),
CompletionCacheTopLevelHashValue(0),
@@ -251,9 +251,6 @@ ASTUnit::~ASTUnit() {
for (const auto &RB : PPOpts.RemappedFileBuffers)
delete RB.second;
}
-
- delete SavedMainFileBuffer;
- delete PreambleBuffer;
ClearCachedCompletionResults();
@@ -511,8 +508,8 @@ public:
: PP(PP), Context(Context), LangOpt(LangOpt), TargetOpts(TargetOpts),
Target(Target), Counter(Counter), InitializedLanguage(false) {}
- bool ReadLanguageOptions(const LangOptions &LangOpts,
- bool Complain) override {
+ bool ReadLanguageOptions(const LangOptions &LangOpts, bool Complain,
+ bool AllowCompatibleDifferences) override {
if (InitializedLanguage)
return false;
@@ -592,6 +589,7 @@ class CaptureDroppedDiagnostics {
DiagnosticsEngine &Diags;
StoredDiagnosticConsumer Client;
DiagnosticConsumer *PreviousClient;
+ std::unique_ptr<DiagnosticConsumer> OwningPreviousClient;
public:
CaptureDroppedDiagnostics(bool RequestCapture, DiagnosticsEngine &Diags,
@@ -599,16 +597,15 @@ public:
: Diags(Diags), Client(StoredDiags), PreviousClient(nullptr)
{
if (RequestCapture || Diags.getClient() == nullptr) {
- PreviousClient = Diags.takeClient();
- Diags.setClient(&Client);
+ OwningPreviousClient = Diags.takeClient();
+ PreviousClient = Diags.getClient();
+ Diags.setClient(&Client, false);
}
}
~CaptureDroppedDiagnostics() {
- if (Diags.getClient() == &Client) {
- Diags.takeClient();
- Diags.setClient(PreviousClient);
- }
+ if (Diags.getClient() == &Client)
+ Diags.setClient(PreviousClient, !!OwningPreviousClient.release());
}
};
@@ -638,38 +635,30 @@ ASTDeserializationListener *ASTUnit::getDeserializationListener() {
return nullptr;
}
-llvm::MemoryBuffer *ASTUnit::getBufferForFile(StringRef Filename,
- std::string *ErrorStr) {
+std::unique_ptr<llvm::MemoryBuffer>
+ASTUnit::getBufferForFile(StringRef Filename, std::string *ErrorStr) {
assert(FileMgr);
- return FileMgr->getBufferForFile(Filename, ErrorStr);
+ auto Buffer = FileMgr->getBufferForFile(Filename);
+ if (Buffer)
+ return std::move(*Buffer);
+ if (ErrorStr)
+ *ErrorStr = Buffer.getError().message();
+ return nullptr;
}
/// \brief Configure the diagnostics object for use with ASTUnit.
-void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> &Diags,
- const char **ArgBegin, const char **ArgEnd,
+void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
ASTUnit &AST, bool CaptureDiagnostics) {
- if (!Diags.get()) {
- // No diagnostics engine was provided, so create our own diagnostics object
- // with the default options.
- DiagnosticConsumer *Client = nullptr;
- if (CaptureDiagnostics)
- Client = new StoredDiagnosticConsumer(AST.StoredDiagnostics);
- Diags = CompilerInstance::createDiagnostics(new DiagnosticOptions(),
- Client,
- /*ShouldOwnClient=*/true);
- } else if (CaptureDiagnostics) {
+ assert(Diags.get() && "no DiagnosticsEngine was provided");
+ if (CaptureDiagnostics)
Diags->setClient(new StoredDiagnosticConsumer(AST.StoredDiagnostics));
- }
}
-ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- const FileSystemOptions &FileSystemOpts,
- bool OnlyLocalDecls,
- ArrayRef<RemappedFile> RemappedFiles,
- bool CaptureDiagnostics,
- bool AllowPCHWithCompilerErrors,
- bool UserFilesAreVolatile) {
+std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
+ const std::string &Filename, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ const FileSystemOptions &FileSystemOpts, bool OnlyLocalDecls,
+ ArrayRef<RemappedFile> RemappedFiles, bool CaptureDiagnostics,
+ bool AllowPCHWithCompilerErrors, bool UserFilesAreVolatile) {
std::unique_ptr<ASTUnit> AST(new ASTUnit(true));
// Recover resources if we crash before exiting this method.
@@ -679,7 +668,7 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
DiagCleanup(Diags.get());
- ConfigureDiags(Diags, nullptr, nullptr, *AST, CaptureDiagnostics);
+ ConfigureDiags(Diags, *AST, CaptureDiagnostics);
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
@@ -705,7 +694,7 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
// Gather Info for preprocessor construction later on.
- HeaderSearch &HeaderInfo = *AST->HeaderInfo.get();
+ HeaderSearch &HeaderInfo = *AST->HeaderInfo;
unsigned Counter;
AST->PP =
@@ -728,10 +717,9 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
/*DisableValidation=*/disableValid,
AllowPCHWithCompilerErrors);
- AST->Reader->setListener(new ASTInfoCollector(*AST->PP, Context,
- AST->ASTFileLangOpts,
- AST->TargetOpts, AST->Target,
- Counter));
+ AST->Reader->setListener(llvm::make_unique<ASTInfoCollector>(
+ *AST->PP, Context, AST->ASTFileLangOpts, AST->TargetOpts, AST->Target,
+ Counter));
switch (AST->Reader->ReadAST(Filename, serialization::MK_MainFile,
SourceLocation(), ASTReader::ARR_None)) {
@@ -768,7 +756,7 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
// Tell the diagnostic client that we have started a source file.
AST->getDiagnostics().getClient()->BeginSourceFile(Context.getLangOpts(),&PP);
- return AST.release();
+ return AST;
}
namespace {
@@ -891,12 +879,13 @@ class TopLevelDeclTrackerAction : public ASTFrontendAction {
public:
ASTUnit &Unit;
- ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) override {
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override {
CI.getPreprocessor().addPPCallbacks(
- new MacroDefinitionTrackerPPCallbacks(Unit.getCurrentTopLevelHashValue()));
- return new TopLevelDeclTrackerConsumer(Unit,
- Unit.getCurrentTopLevelHashValue());
+ llvm::make_unique<MacroDefinitionTrackerPPCallbacks>(
+ Unit.getCurrentTopLevelHashValue()));
+ return llvm::make_unique<TopLevelDeclTrackerConsumer>(
+ Unit, Unit.getCurrentTopLevelHashValue());
}
public:
@@ -916,8 +905,8 @@ public:
explicit PrecompilePreambleAction(ASTUnit &Unit)
: Unit(Unit), HasEmittedPreamblePCH(false) {}
- ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) override;
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override;
bool hasEmittedPreamblePCH() const { return HasEmittedPreamblePCH; }
void setHasEmittedPreamblePCH() { HasEmittedPreamblePCH = true; }
bool shouldEraseOutputFiles() override { return !hasEmittedPreamblePCH(); }
@@ -979,8 +968,9 @@ public:
}
-ASTConsumer *PrecompilePreambleAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+PrecompilePreambleAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
std::string Sysroot;
std::string OutputFile;
raw_ostream *OS = nullptr;
@@ -991,10 +981,11 @@ ASTConsumer *PrecompilePreambleAction::CreateASTConsumer(CompilerInstance &CI,
if (!CI.getFrontendOpts().RelocatablePCH)
Sysroot.clear();
- CI.getPreprocessor().addPPCallbacks(new MacroDefinitionTrackerPPCallbacks(
- Unit.getCurrentTopLevelHashValue()));
- return new PrecompilePreambleConsumer(Unit, this, CI.getPreprocessor(),
- Sysroot, OS);
+ CI.getPreprocessor().addPPCallbacks(
+ llvm::make_unique<MacroDefinitionTrackerPPCallbacks>(
+ Unit.getCurrentTopLevelHashValue()));
+ return llvm::make_unique<PrecompilePreambleConsumer>(
+ Unit, this, CI.getPreprocessor(), Sysroot, OS);
}
static bool isNonDriverDiag(const StoredDiagnostic &StoredDiag) {
@@ -1031,15 +1022,12 @@ static void checkAndSanitizeDiags(SmallVectorImpl<StoredDiagnostic> &
///
/// \returns True if a failure occurred that causes the ASTUnit not to
/// contain any translation-unit information, false otherwise.
-bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
- delete SavedMainFileBuffer;
- SavedMainFileBuffer = nullptr;
+bool ASTUnit::Parse(std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer) {
+ SavedMainFileBuffer.reset();
- if (!Invocation) {
- delete OverrideMainBuffer;
+ if (!Invocation)
return true;
- }
-
+
// Create the compiler instance to use for building the AST.
std::unique_ptr<CompilerInstance> Clang(new CompilerInstance());
@@ -1060,10 +1048,8 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
// Create the target instance.
Clang->setTarget(TargetInfo::CreateTargetInfo(
Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
- if (!Clang->hasTarget()) {
- delete OverrideMainBuffer;
+ if (!Clang->hasTarget())
return true;
- }
// Inform the target of the language options.
//
@@ -1083,10 +1069,8 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
FileSystemOpts = Clang->getFileSystemOpts();
IntrusiveRefCntPtr<vfs::FileSystem> VFS =
createVFSFromCompilerInvocation(Clang->getInvocation(), getDiagnostics());
- if (!VFS) {
- delete OverrideMainBuffer;
+ if (!VFS)
return true;
- }
FileMgr = new FileManager(FileSystemOpts, VFS);
SourceMgr = new SourceManager(getDiagnostics(), *FileMgr,
UserFilesAreVolatile);
@@ -1115,7 +1099,8 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
// make that override happen and introduce the preamble.
PreprocessorOptions &PreprocessorOpts = Clang->getPreprocessorOpts();
if (OverrideMainBuffer) {
- PreprocessorOpts.addRemappedFile(OriginalSourceFile, OverrideMainBuffer);
+ PreprocessorOpts.addRemappedFile(OriginalSourceFile,
+ OverrideMainBuffer.get());
PreprocessorOpts.PrecompiledPreambleBytes.first = Preamble.size();
PreprocessorOpts.PrecompiledPreambleBytes.second
= PreambleEndsAtStartOfLine;
@@ -1130,7 +1115,7 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
checkAndSanitizeDiags(StoredDiagnostics, getSourceManager());
// Keep track of the override buffer;
- SavedMainFileBuffer = OverrideMainBuffer;
+ SavedMainFileBuffer = std::move(OverrideMainBuffer);
}
std::unique_ptr<TopLevelDeclTrackerAction> Act(
@@ -1143,7 +1128,7 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0]))
goto error;
- if (OverrideMainBuffer) {
+ if (SavedMainFileBuffer) {
std::string ModName = getPreambleFile(this);
TranslateStoredDiagnostics(getFileManager(), getSourceManager(),
PreambleDiagnostics, StoredDiagnostics);
@@ -1162,10 +1147,7 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
error:
// Remove the overridden buffer we used for the preamble.
- if (OverrideMainBuffer) {
- delete OverrideMainBuffer;
- SavedMainFileBuffer = nullptr;
- }
+ SavedMainFileBuffer = nullptr;
// Keep the ownership of the data in the ASTUnit because the client may
// want to see the diagnostics.
@@ -1194,17 +1176,16 @@ static std::string GetPreamblePCHPath() {
/// \brief Compute the preamble for the main file, providing the source buffer
/// that corresponds to the main file along with a pair (bytes, start-of-line)
/// that describes the preamble.
-std::pair<llvm::MemoryBuffer *, std::pair<unsigned, bool> >
-ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
- unsigned MaxLines, bool &CreatedBuffer) {
+ASTUnit::ComputedPreamble
+ASTUnit::ComputePreamble(CompilerInvocation &Invocation, unsigned MaxLines) {
FrontendOptions &FrontendOpts = Invocation.getFrontendOpts();
PreprocessorOptions &PreprocessorOpts = Invocation.getPreprocessorOpts();
- CreatedBuffer = false;
// Try to determine if the main file has been remapped, either from the
// command line (to another file) or directly through the compiler invocation
// (to a memory buffer).
llvm::MemoryBuffer *Buffer = nullptr;
+ std::unique_ptr<llvm::MemoryBuffer> BufferOwner;
std::string MainFilePath(FrontendOpts.Inputs[0].getFile());
llvm::sys::fs::UniqueID MainFileID;
if (!llvm::sys::fs::getUniqueID(MainFilePath, MainFileID)) {
@@ -1215,15 +1196,9 @@ ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
if (!llvm::sys::fs::getUniqueID(MPath, MID)) {
if (MainFileID == MID) {
// We found a remapping. Try to load the resulting, remapped source.
- if (CreatedBuffer) {
- delete Buffer;
- CreatedBuffer = false;
- }
-
- Buffer = getBufferForFile(RF.second);
- if (!Buffer)
- return std::make_pair(nullptr, std::make_pair(0, true));
- CreatedBuffer = true;
+ BufferOwner = getBufferForFile(RF.second);
+ if (!BufferOwner)
+ return ComputedPreamble(nullptr, nullptr, 0, true);
}
}
}
@@ -1236,11 +1211,7 @@ ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
if (!llvm::sys::fs::getUniqueID(MPath, MID)) {
if (MainFileID == MID) {
// We found a remapping.
- if (CreatedBuffer) {
- delete Buffer;
- CreatedBuffer = false;
- }
-
+ BufferOwner.reset();
Buffer = const_cast<llvm::MemoryBuffer *>(RB.second);
}
}
@@ -1248,17 +1219,18 @@ ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
}
// If the main source file was not remapped, load it now.
- if (!Buffer) {
- Buffer = getBufferForFile(FrontendOpts.Inputs[0].getFile());
- if (!Buffer)
- return std::make_pair(nullptr, std::make_pair(0, true));
-
- CreatedBuffer = true;
+ if (!Buffer && !BufferOwner) {
+ BufferOwner = getBufferForFile(FrontendOpts.Inputs[0].getFile());
+ if (!BufferOwner)
+ return ComputedPreamble(nullptr, nullptr, 0, true);
}
-
- return std::make_pair(Buffer, Lexer::ComputePreamble(Buffer,
- *Invocation.getLangOpts(),
- MaxLines));
+
+ if (!Buffer)
+ Buffer = BufferOwner.get();
+ auto Pre = Lexer::ComputePreamble(Buffer->getBuffer(),
+ *Invocation.getLangOpts(), MaxLines);
+ return ComputedPreamble(Buffer, std::move(BufferOwner), Pre.first,
+ Pre.second);
}
ASTUnit::PreambleFileHash
@@ -1300,42 +1272,44 @@ makeStandaloneRange(CharSourceRange Range, const SourceManager &SM,
return std::make_pair(Offset, EndOffset);
}
-static void makeStandaloneFixIt(const SourceManager &SM,
- const LangOptions &LangOpts,
- const FixItHint &InFix,
- ASTUnit::StandaloneFixIt &OutFix) {
+static ASTUnit::StandaloneFixIt makeStandaloneFixIt(const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const FixItHint &InFix) {
+ ASTUnit::StandaloneFixIt OutFix;
OutFix.RemoveRange = makeStandaloneRange(InFix.RemoveRange, SM, LangOpts);
OutFix.InsertFromRange = makeStandaloneRange(InFix.InsertFromRange, SM,
LangOpts);
OutFix.CodeToInsert = InFix.CodeToInsert;
OutFix.BeforePreviousInsertions = InFix.BeforePreviousInsertions;
+ return OutFix;
}
-static void makeStandaloneDiagnostic(const LangOptions &LangOpts,
- const StoredDiagnostic &InDiag,
- ASTUnit::StandaloneDiagnostic &OutDiag) {
+static ASTUnit::StandaloneDiagnostic
+makeStandaloneDiagnostic(const LangOptions &LangOpts,
+ const StoredDiagnostic &InDiag) {
+ ASTUnit::StandaloneDiagnostic OutDiag;
OutDiag.ID = InDiag.getID();
OutDiag.Level = InDiag.getLevel();
OutDiag.Message = InDiag.getMessage();
OutDiag.LocOffset = 0;
if (InDiag.getLocation().isInvalid())
- return;
+ return OutDiag;
const SourceManager &SM = InDiag.getLocation().getManager();
SourceLocation FileLoc = SM.getFileLoc(InDiag.getLocation());
OutDiag.Filename = SM.getFilename(FileLoc);
if (OutDiag.Filename.empty())
- return;
+ return OutDiag;
OutDiag.LocOffset = SM.getFileOffset(FileLoc);
for (StoredDiagnostic::range_iterator
I = InDiag.range_begin(), E = InDiag.range_end(); I != E; ++I) {
OutDiag.Ranges.push_back(makeStandaloneRange(*I, SM, LangOpts));
}
- for (StoredDiagnostic::fixit_iterator
- I = InDiag.fixit_begin(), E = InDiag.fixit_end(); I != E; ++I) {
- ASTUnit::StandaloneFixIt Fix;
- makeStandaloneFixIt(SM, LangOpts, *I, Fix);
- OutDiag.FixIts.push_back(Fix);
- }
+ for (StoredDiagnostic::fixit_iterator I = InDiag.fixit_begin(),
+ E = InDiag.fixit_end();
+ I != E; ++I)
+ OutDiag.FixIts.push_back(makeStandaloneFixIt(SM, LangOpts, *I));
+
+ return OutDiag;
}
/// \brief Attempt to build or re-use a precompiled preamble when (re-)parsing
@@ -1358,27 +1332,20 @@ static void makeStandaloneDiagnostic(const LangOptions &LangOpts,
/// \returns If the precompiled preamble can be used, returns a newly-allocated
/// buffer that should be used in place of the main file when doing so.
/// Otherwise, returns a NULL pointer.
-llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
- const CompilerInvocation &PreambleInvocationIn,
- bool AllowRebuild,
- unsigned MaxLines) {
-
+std::unique_ptr<llvm::MemoryBuffer>
+ASTUnit::getMainBufferWithPrecompiledPreamble(
+ const CompilerInvocation &PreambleInvocationIn, bool AllowRebuild,
+ unsigned MaxLines) {
+
IntrusiveRefCntPtr<CompilerInvocation>
PreambleInvocation(new CompilerInvocation(PreambleInvocationIn));
FrontendOptions &FrontendOpts = PreambleInvocation->getFrontendOpts();
PreprocessorOptions &PreprocessorOpts
= PreambleInvocation->getPreprocessorOpts();
- bool CreatedPreambleBuffer = false;
- std::pair<llvm::MemoryBuffer *, std::pair<unsigned, bool> > NewPreamble
- = ComputePreamble(*PreambleInvocation, MaxLines, CreatedPreambleBuffer);
-
- // If ComputePreamble() Take ownership of the preamble buffer.
- std::unique_ptr<llvm::MemoryBuffer> OwnedPreambleBuffer;
- if (CreatedPreambleBuffer)
- OwnedPreambleBuffer.reset(NewPreamble.first);
+ ComputedPreamble NewPreamble = ComputePreamble(*PreambleInvocation, MaxLines);
- if (!NewPreamble.second.first) {
+ if (!NewPreamble.Size) {
// We couldn't find a preamble in the main source. Clear out the current
// preamble, if we have one. It's obviously no good any more.
Preamble.clear();
@@ -1394,10 +1361,10 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// preamble now that we did before, and that there's enough space in
// the main-file buffer within the precompiled preamble to fit the
// new main file.
- if (Preamble.size() == NewPreamble.second.first &&
- PreambleEndsAtStartOfLine == NewPreamble.second.second &&
- memcmp(Preamble.getBufferStart(), NewPreamble.first->getBufferStart(),
- NewPreamble.second.first) == 0) {
+ if (Preamble.size() == NewPreamble.Size &&
+ PreambleEndsAtStartOfLine == NewPreamble.PreambleEndsAtStartOfLine &&
+ memcmp(Preamble.getBufferStart(), NewPreamble.Buffer->getBufferStart(),
+ NewPreamble.Size) == 0) {
// The preamble has not changed. We may be able to re-use the precompiled
// preamble.
@@ -1467,7 +1434,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
getDiagnostics().setNumWarnings(NumWarningsInPreamble);
return llvm::MemoryBuffer::getMemBufferCopy(
- NewPreamble.first->getBuffer(), FrontendOpts.Inputs[0].getFile());
+ NewPreamble.Buffer->getBuffer(), FrontendOpts.Inputs[0].getFile());
}
}
@@ -1512,19 +1479,16 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// subsequent reparses.
StringRef MainFilename = FrontendOpts.Inputs[0].getFile();
Preamble.assign(FileMgr->getFile(MainFilename),
- NewPreamble.first->getBufferStart(),
- NewPreamble.first->getBufferStart()
- + NewPreamble.second.first);
- PreambleEndsAtStartOfLine = NewPreamble.second.second;
+ NewPreamble.Buffer->getBufferStart(),
+ NewPreamble.Buffer->getBufferStart() + NewPreamble.Size);
+ PreambleEndsAtStartOfLine = NewPreamble.PreambleEndsAtStartOfLine;
- delete PreambleBuffer;
- PreambleBuffer
- = llvm::MemoryBuffer::getMemBufferCopy(
- NewPreamble.first->getBuffer().slice(0, Preamble.size()), MainFilename);
+ PreambleBuffer = llvm::MemoryBuffer::getMemBufferCopy(
+ NewPreamble.Buffer->getBuffer().slice(0, Preamble.size()), MainFilename);
// Remap the main source file to the preamble buffer.
StringRef MainFilePath = FrontendOpts.Inputs[0].getFile();
- PreprocessorOpts.addRemappedFile(MainFilePath, PreambleBuffer);
+ PreprocessorOpts.addRemappedFile(MainFilePath, PreambleBuffer.get());
// Tell the compiler invocation to generate a temporary precompiled header.
FrontendOpts.ProgramAction = frontend::GeneratePCH;
@@ -1607,13 +1571,11 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// Transfer any diagnostics generated when parsing the preamble into the set
// of preamble diagnostics.
- for (stored_diag_iterator
- I = stored_diag_afterDriver_begin(),
- E = stored_diag_end(); I != E; ++I) {
- StandaloneDiagnostic Diag;
- makeStandaloneDiagnostic(Clang->getLangOpts(), *I, Diag);
- PreambleDiagnostics.push_back(Diag);
- }
+ for (stored_diag_iterator I = stored_diag_afterDriver_begin(),
+ E = stored_diag_end();
+ I != E; ++I)
+ PreambleDiagnostics.push_back(
+ makeStandaloneDiagnostic(Clang->getLangOpts(), *I));
Act->EndSourceFile();
@@ -1663,8 +1625,8 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
CompletionCacheTopLevelHashValue = 0;
PreambleTopLevelHashValue = CurrentTopLevelHashValue;
}
-
- return llvm::MemoryBuffer::getMemBufferCopy(NewPreamble.first->getBuffer(),
+
+ return llvm::MemoryBuffer::getMemBufferCopy(NewPreamble.Buffer->getBuffer(),
MainFilename);
}
@@ -1688,8 +1650,8 @@ void ASTUnit::transferASTDataFromCompilerInstance(CompilerInstance &CI) {
// created.
assert(CI.hasInvocation() && "missing invocation");
LangOpts = CI.getInvocation().LangOpts;
- TheSema.reset(CI.takeSema());
- Consumer.reset(CI.takeASTConsumer());
+ TheSema = CI.takeSema();
+ Consumer = CI.takeASTConsumer();
if (CI.hasASTContext())
Ctx = &CI.getASTContext();
if (CI.hasPreprocessor())
@@ -1735,7 +1697,7 @@ ASTUnit *ASTUnit::create(CompilerInvocation *CI,
bool UserFilesAreVolatile) {
std::unique_ptr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
- ConfigureDiags(Diags, nullptr, nullptr, *AST, CaptureDiagnostics);
+ ConfigureDiags(Diags, *AST, CaptureDiagnostics);
AST->Diagnostics = Diags;
AST->Invocation = CI;
AST->FileSystemOpts = CI->getFileSystemOpts();
@@ -1862,13 +1824,15 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
if (Persistent && !TrackerAct) {
Clang->getPreprocessor().addPPCallbacks(
- new MacroDefinitionTrackerPPCallbacks(AST->getCurrentTopLevelHashValue()));
- std::vector<ASTConsumer*> Consumers;
+ llvm::make_unique<MacroDefinitionTrackerPPCallbacks>(
+ AST->getCurrentTopLevelHashValue()));
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
if (Clang->hasASTConsumer())
Consumers.push_back(Clang->takeASTConsumer());
- Consumers.push_back(new TopLevelDeclTrackerConsumer(*AST,
- AST->getCurrentTopLevelHashValue()));
- Clang->setASTConsumer(new MultiplexConsumer(Consumers));
+ Consumers.push_back(llvm::make_unique<TopLevelDeclTrackerConsumer>(
+ *AST, AST->getCurrentTopLevelHashValue()));
+ Clang->setASTConsumer(
+ llvm::make_unique<MultiplexConsumer>(std::move(Consumers)));
}
if (!Act->Execute()) {
AST->transferASTDataFromCompilerInstance(*Clang);
@@ -1898,11 +1862,10 @@ bool ASTUnit::LoadFromCompilerInvocation(bool PrecompilePreamble) {
Invocation->getFrontendOpts().DisableFree = false;
ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
- llvm::MemoryBuffer *OverrideMainBuffer = nullptr;
+ std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
if (PrecompilePreamble) {
PreambleRebuildCounter = 2;
- OverrideMainBuffer
- = getMainBufferWithPrecompiledPreamble(*Invocation);
+ OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(*Invocation);
}
SimpleTimer ParsingTimer(WantTiming);
@@ -1910,9 +1873,9 @@ bool ASTUnit::LoadFromCompilerInvocation(bool PrecompilePreamble) {
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<llvm::MemoryBuffer>
- MemBufferCleanup(OverrideMainBuffer);
-
- return Parse(OverrideMainBuffer);
+ MemBufferCleanup(OverrideMainBuffer.get());
+
+ return Parse(std::move(OverrideMainBuffer));
}
std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
@@ -1922,7 +1885,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
bool IncludeBriefCommentsInCodeCompletion, bool UserFilesAreVolatile) {
// Create the AST unit.
std::unique_ptr<ASTUnit> AST(new ASTUnit(false));
- ConfigureDiags(Diags, nullptr, nullptr, *AST, CaptureDiagnostics);
+ ConfigureDiags(Diags, *AST, CaptureDiagnostics);
AST->Diagnostics = Diags;
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
@@ -1961,11 +1924,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
bool AllowPCHWithCompilerErrors, bool SkipFunctionBodies,
bool UserFilesAreVolatile, bool ForSerialization,
std::unique_ptr<ASTUnit> *ErrAST) {
- if (!Diags.get()) {
- // No diagnostics engine was provided, so create our own diagnostics object
- // with the default options.
- Diags = CompilerInstance::createDiagnostics(new DiagnosticOptions());
- }
+ assert(Diags.get() && "no DiagnosticsEngine was provided");
SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
@@ -2000,9 +1959,8 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
// Create the AST unit.
std::unique_ptr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
- ConfigureDiags(Diags, ArgBegin, ArgEnd, *AST, CaptureDiagnostics);
+ ConfigureDiags(Diags, *AST, CaptureDiagnostics);
AST->Diagnostics = Diags;
- Diags = nullptr; // Zero out now to ease cleanup during crash recovery.
AST->FileSystemOpts = CI->getFileSystemOpts();
IntrusiveRefCntPtr<vfs::FileSystem> VFS =
createVFSFromCompilerInvocation(*CI, *Diags);
@@ -2021,7 +1979,9 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
AST->Invocation = CI;
if (ForSerialization)
AST->WriterData.reset(new ASTWriterData());
- CI = nullptr; // Zero out now to ease cleanup during crash recovery.
+ // Zero out now to ease cleanup during crash recovery.
+ CI = nullptr;
+ Diags = nullptr;
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
@@ -2062,7 +2022,7 @@ bool ASTUnit::Reparse(ArrayRef<RemappedFile> RemappedFiles) {
// If we have a preamble file lying around, or if we might try to
// build a precompiled preamble, do so now.
- llvm::MemoryBuffer *OverrideMainBuffer = nullptr;
+ std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
if (!getPreambleFile(this).empty() || PreambleRebuildCounter > 0)
OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(*Invocation);
@@ -2073,8 +2033,8 @@ bool ASTUnit::Reparse(ArrayRef<RemappedFile> RemappedFiles) {
getDiagnostics().setNumWarnings(NumWarningsInPreamble);
// Parse the sources
- bool Result = Parse(OverrideMainBuffer);
-
+ bool Result = Parse(std::move(OverrideMainBuffer));
+
// If we're caching global code-completion results, and the top-level
// declarations have changed, clear out the code-completion cache.
if (!Result && ShouldCacheCodeCompletionResults &&
@@ -2366,6 +2326,10 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
// Set the language options appropriately.
LangOpts = *CCInvocation->getLangOpts();
+ // Spell-checking and warnings are wasteful during code-completion.
+ LangOpts.SpellChecking = false;
+ CCInvocation->getDiagnosticOpts().IgnoreWarnings = true;
+
std::unique_ptr<CompilerInstance> Clang(new CompilerInstance());
// Recover resources if we crash before exiting this method.
@@ -2427,7 +2391,7 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
// the use of the precompiled preamble if we're if the completion
// point is within the main file, after the end of the precompiled
// preamble.
- llvm::MemoryBuffer *OverrideMainBuffer = nullptr;
+ std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
if (!getPreambleFile(this).empty()) {
std::string CompleteFilePath(File);
llvm::sys::fs::UniqueID CompleteFileID;
@@ -2437,9 +2401,8 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
llvm::sys::fs::UniqueID MainID;
if (!llvm::sys::fs::getUniqueID(MainPath, MainID)) {
if (CompleteFileID == MainID && Line > 1)
- OverrideMainBuffer
- = getMainBufferWithPrecompiledPreamble(*CCInvocation, false,
- Line - 1);
+ OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(
+ *CCInvocation, false, Line - 1);
}
}
}
@@ -2447,14 +2410,15 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
// If the main file has been overridden due to the use of a preamble,
// make that override happen and introduce the preamble.
if (OverrideMainBuffer) {
- PreprocessorOpts.addRemappedFile(OriginalSourceFile, OverrideMainBuffer);
+ PreprocessorOpts.addRemappedFile(OriginalSourceFile,
+ OverrideMainBuffer.get());
PreprocessorOpts.PrecompiledPreambleBytes.first = Preamble.size();
PreprocessorOpts.PrecompiledPreambleBytes.second
= PreambleEndsAtStartOfLine;
PreprocessorOpts.ImplicitPCHInclude = getPreambleFile(this);
PreprocessorOpts.DisablePCHValidation = true;
-
- OwnedBuffers.push_back(OverrideMainBuffer);
+
+ OwnedBuffers.push_back(OverrideMainBuffer.release());
} else {
PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
PreprocessorOpts.PrecompiledPreambleBytes.second = false;
@@ -2821,7 +2785,8 @@ struct PCHLocatorInfo {
static bool PCHLocator(serialization::ModuleFile &M, void *UserData) {
PCHLocatorInfo &Info = *static_cast<PCHLocatorInfo*>(UserData);
switch (M.Kind) {
- case serialization::MK_Module:
+ case serialization::MK_ImplicitModule:
+ case serialization::MK_ExplicitModule:
return true; // skip dependencies.
case serialization::MK_PCH:
Info.Mod = &M;
diff --git a/lib/Frontend/CMakeLists.txt b/lib/Frontend/CMakeLists.txt
index 403cc42927a6..7c5fca54d1e1 100644
--- a/lib/Frontend/CMakeLists.txt
+++ b/lib/Frontend/CMakeLists.txt
@@ -1,6 +1,7 @@
add_subdirectory(Rewrite)
set(LLVM_LINK_COMPONENTS
+ BitReader
Option
Support
)
@@ -12,6 +13,7 @@ add_clang_library(clangFrontend
CacheTokens.cpp
ChainedDiagnosticConsumer.cpp
ChainedIncludesSource.cpp
+ CodeGenOptions.cpp
CompilerInstance.cpp
CompilerInvocation.cpp
CreateInvocationFromCommandLine.cpp
@@ -31,6 +33,7 @@ add_clang_library(clangFrontend
MultiplexConsumer.cpp
PrintPreprocessedOutput.cpp
SerializedDiagnosticPrinter.cpp
+ SerializedDiagnosticReader.cpp
TextDiagnostic.cpp
TextDiagnosticBuffer.cpp
TextDiagnosticPrinter.cpp
diff --git a/lib/Frontend/CacheTokens.cpp b/lib/Frontend/CacheTokens.cpp
index 14f7027e4687..d909d526b518 100644
--- a/lib/Frontend/CacheTokens.cpp
+++ b/lib/Frontend/CacheTokens.cpp
@@ -270,17 +270,17 @@ void PTHWriter::EmitToken(const Token& T) {
StringRef s(T.getLiteralData(), T.getLength());
// Get the string entry.
- llvm::StringMapEntry<OffsetOpt> *E = &CachedStrs.GetOrCreateValue(s);
+ auto &E = *CachedStrs.insert(std::make_pair(s, OffsetOpt())).first;
// If this is a new string entry, bump the PTH offset.
- if (!E->getValue().hasOffset()) {
- E->getValue().setOffset(CurStrOffset);
- StrEntries.push_back(E);
+ if (!E.second.hasOffset()) {
+ E.second.setOffset(CurStrOffset);
+ StrEntries.push_back(&E);
CurStrOffset += s.size() + 1;
}
// Emit the relative offset into the PTH file for the spelling string.
- Emit32(E->getValue().getOffset());
+ Emit32(E.second.getOffset());
}
// Emit the offset into the original source file of this token so that we
@@ -572,8 +572,10 @@ void clang::CacheTokens(Preprocessor &PP, llvm::raw_fd_ostream* OS) {
PTHWriter PW(*OS, PP);
// Install the 'stat' system call listener in the FileManager.
- StatListener *StatCache = new StatListener(PW.getPM());
- PP.getFileManager().addStatCache(StatCache, /*AtBeginning=*/true);
+ auto StatCacheOwner = llvm::make_unique<StatListener>(PW.getPM());
+ StatListener *StatCache = StatCacheOwner.get();
+ PP.getFileManager().addStatCache(std::move(StatCacheOwner),
+ /*AtBeginning=*/true);
// Lex through the entire file. This will populate SourceManager with
// all of the header information.
diff --git a/lib/Frontend/ChainedIncludesSource.cpp b/lib/Frontend/ChainedIncludesSource.cpp
index e6e73ac963fb..cb260b4f4c69 100644
--- a/lib/Frontend/ChainedIncludesSource.cpp
+++ b/lib/Frontend/ChainedIncludesSource.cpp
@@ -74,7 +74,7 @@ protected:
static ASTReader *
createASTReader(CompilerInstance &CI, StringRef pchFile,
- SmallVectorImpl<llvm::MemoryBuffer *> &memBufs,
+ SmallVectorImpl<std::unique_ptr<llvm::MemoryBuffer>> &MemBufs,
SmallVectorImpl<std::string> &bufNames,
ASTDeserializationListener *deserialListener = nullptr) {
Preprocessor &PP = CI.getPreprocessor();
@@ -83,7 +83,7 @@ createASTReader(CompilerInstance &CI, StringRef pchFile,
/*DisableValidation=*/true));
for (unsigned ti = 0; ti < bufNames.size(); ++ti) {
StringRef sr(bufNames[ti]);
- Reader->addInMemoryBuffer(sr, memBufs[ti]);
+ Reader->addInMemoryBuffer(sr, std::move(MemBufs[ti]));
}
Reader->setDeserializationListener(deserialListener);
switch (Reader->ReadAST(pchFile, serialization::MK_PCH, SourceLocation(),
@@ -118,7 +118,7 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
IntrusiveRefCntPtr<ChainedIncludesSource> source(new ChainedIncludesSource());
InputKind IK = CI.getFrontendOpts().Inputs[0].getKind();
- SmallVector<llvm::MemoryBuffer *, 4> serialBufs;
+ SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 4> SerialBufs;
SmallVector<std::string, 4> serialBufNames;
for (unsigned i = 0, e = includes.size(); i != e; ++i) {
@@ -158,12 +158,12 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
SmallVector<char, 256> serialAST;
llvm::raw_svector_ostream OS(serialAST);
- std::unique_ptr<ASTConsumer> consumer;
- consumer.reset(new PCHGenerator(Clang->getPreprocessor(), "-", nullptr,
- /*isysroot=*/"", &OS));
+ auto consumer =
+ llvm::make_unique<PCHGenerator>(Clang->getPreprocessor(), "-", nullptr,
+ /*isysroot=*/"", &OS);
Clang->getASTContext().setASTMutationListener(
consumer->GetASTMutationListener());
- Clang->setASTConsumer(consumer.release());
+ Clang->setASTConsumer(std::move(consumer));
Clang->createSema(TU_Prefix, nullptr);
if (firstInclude) {
@@ -171,20 +171,21 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
PP.getBuiltinInfo().InitializeBuiltins(PP.getIdentifierTable(),
PP.getLangOpts());
} else {
- assert(!serialBufs.empty());
- SmallVector<llvm::MemoryBuffer *, 4> bufs;
+ assert(!SerialBufs.empty());
+ SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 4> Bufs;
// TODO: Pass through the existing MemoryBuffer instances instead of
// allocating new ones.
- for (auto *SB : serialBufs)
- bufs.push_back(llvm::MemoryBuffer::getMemBuffer(SB->getBuffer()));
+ for (auto &SB : SerialBufs)
+ Bufs.push_back(llvm::MemoryBuffer::getMemBuffer(SB->getBuffer()));
std::string pchName = includes[i-1];
llvm::raw_string_ostream os(pchName);
os << ".pch" << i-1;
serialBufNames.push_back(os.str());
IntrusiveRefCntPtr<ASTReader> Reader;
- Reader = createASTReader(*Clang, pchName, bufs, serialBufNames,
- Clang->getASTConsumer().GetASTDeserializationListener());
+ Reader = createASTReader(
+ *Clang, pchName, Bufs, serialBufNames,
+ Clang->getASTConsumer().GetASTDeserializationListener());
if (!Reader)
return nullptr;
Clang->setModuleManager(Reader);
@@ -196,14 +197,14 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
ParseAST(Clang->getSema());
Clang->getDiagnosticClient().EndSourceFile();
- serialBufs.push_back(llvm::MemoryBuffer::getMemBufferCopy(OS.str()));
+ SerialBufs.push_back(llvm::MemoryBuffer::getMemBufferCopy(OS.str()));
source->CIs.push_back(Clang.release());
}
- assert(!serialBufs.empty());
+ assert(!SerialBufs.empty());
std::string pchName = includes.back() + ".pch-final";
serialBufNames.push_back(pchName);
- Reader = createASTReader(CI, pchName, serialBufs, serialBufNames);
+ Reader = createASTReader(CI, pchName, SerialBufs, serialBufNames);
if (!Reader)
return nullptr;
diff --git a/lib/Frontend/CodeGenOptions.cpp b/lib/Frontend/CodeGenOptions.cpp
new file mode 100644
index 000000000000..75ee47f86806
--- /dev/null
+++ b/lib/Frontend/CodeGenOptions.cpp
@@ -0,0 +1,24 @@
+//===--- CodeGenOptions.cpp -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/CodeGenOptions.h"
+#include <string.h>
+
+namespace clang {
+
+CodeGenOptions::CodeGenOptions() {
+#define CODEGENOPT(Name, Bits, Default) Name = Default;
+#define ENUM_CODEGENOPT(Name, Type, Bits, Default) set##Name(Default);
+#include "clang/Frontend/CodeGenOptions.def"
+
+ RelocationModel = "pic";
+ memcpy(CoverageVersion, "402*", 4);
+}
+
+} // end namespace clang
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index 6af920d9fd78..93a34b722274 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -101,14 +101,18 @@ void CompilerInstance::setSema(Sema *S) {
TheSema.reset(S);
}
-void CompilerInstance::setASTConsumer(ASTConsumer *Value) {
- Consumer.reset(Value);
+void CompilerInstance::setASTConsumer(std::unique_ptr<ASTConsumer> Value) {
+ Consumer = std::move(Value);
}
void CompilerInstance::setCodeCompletionConsumer(CodeCompleteConsumer *Value) {
CompletionConsumer.reset(Value);
}
-
+
+std::unique_ptr<Sema> CompilerInstance::takeSema() {
+ return std::move(TheSema);
+}
+
IntrusiveRefCntPtr<ASTReader> CompilerInstance::getModuleManager() const {
return ModuleManager;
}
@@ -130,52 +134,48 @@ void CompilerInstance::setModuleDepCollector(
static void SetUpDiagnosticLog(DiagnosticOptions *DiagOpts,
const CodeGenOptions *CodeGenOpts,
DiagnosticsEngine &Diags) {
- std::string ErrorInfo;
- bool OwnsStream = false;
+ std::error_code EC;
+ std::unique_ptr<raw_ostream> StreamOwner;
raw_ostream *OS = &llvm::errs();
if (DiagOpts->DiagnosticLogFile != "-") {
// Create the output stream.
- llvm::raw_fd_ostream *FileOS(new llvm::raw_fd_ostream(
- DiagOpts->DiagnosticLogFile.c_str(), ErrorInfo,
- llvm::sys::fs::F_Append | llvm::sys::fs::F_Text));
- if (!ErrorInfo.empty()) {
+ auto FileOS = llvm::make_unique<llvm::raw_fd_ostream>(
+ DiagOpts->DiagnosticLogFile, EC,
+ llvm::sys::fs::F_Append | llvm::sys::fs::F_Text);
+ if (EC) {
Diags.Report(diag::warn_fe_cc_log_diagnostics_failure)
- << DiagOpts->DiagnosticLogFile << ErrorInfo;
+ << DiagOpts->DiagnosticLogFile << EC.message();
} else {
FileOS->SetUnbuffered();
FileOS->SetUseAtomicWrites(true);
- OS = FileOS;
- OwnsStream = true;
+ OS = FileOS.get();
+ StreamOwner = std::move(FileOS);
}
}
// Chain in the diagnostic client which will log the diagnostics.
- LogDiagnosticPrinter *Logger = new LogDiagnosticPrinter(*OS, DiagOpts,
- OwnsStream);
+ auto Logger = llvm::make_unique<LogDiagnosticPrinter>(*OS, DiagOpts,
+ std::move(StreamOwner));
if (CodeGenOpts)
Logger->setDwarfDebugFlags(CodeGenOpts->DwarfDebugFlags);
- Diags.setClient(new ChainedDiagnosticConsumer(Diags.takeClient(), Logger));
+ assert(Diags.ownsClient());
+ Diags.setClient(
+ new ChainedDiagnosticConsumer(Diags.takeClient(), std::move(Logger)));
}
static void SetupSerializedDiagnostics(DiagnosticOptions *DiagOpts,
DiagnosticsEngine &Diags,
StringRef OutputFile) {
- std::string ErrorInfo;
- std::unique_ptr<llvm::raw_fd_ostream> OS;
- OS.reset(new llvm::raw_fd_ostream(OutputFile.str().c_str(), ErrorInfo,
- llvm::sys::fs::F_None));
+ auto SerializedConsumer =
+ clang::serialized_diags::create(OutputFile, DiagOpts);
- if (!ErrorInfo.empty()) {
- Diags.Report(diag::warn_fe_serialized_diag_failure)
- << OutputFile << ErrorInfo;
- return;
+ if (Diags.ownsClient()) {
+ Diags.setClient(new ChainedDiagnosticConsumer(
+ Diags.takeClient(), std::move(SerializedConsumer)));
+ } else {
+ Diags.setClient(new ChainedDiagnosticConsumer(
+ Diags.getClient(), std::move(SerializedConsumer)));
}
-
- DiagnosticConsumer *SerializedConsumer =
- clang::serialized_diags::create(OS.release(), DiagOpts);
-
- Diags.setClient(new ChainedDiagnosticConsumer(Diags.takeClient(),
- SerializedConsumer));
}
void CompilerInstance::createDiagnostics(DiagnosticConsumer *Client,
@@ -371,6 +371,14 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
AttachHeaderIncludeGen(*PP, /*ShowAllHeaders=*/false, /*OutputPath=*/"",
/*ShowDepth=*/true, /*MSStyle=*/true);
}
+
+ // Load all explictly-specified module map files.
+ for (const auto &Filename : getFrontendOpts().ModuleMapFiles) {
+ if (auto *File = getFileManager().getFile(Filename))
+ PP->getHeaderSearchInfo().loadModuleMapFile(File, /*IsSystem*/false);
+ else
+ getDiagnostics().Report(diag::err_module_map_not_found) << Filename;
+ }
}
// ASTContext
@@ -569,17 +577,14 @@ CompilerInstance::createOutputFile(StringRef OutputPath,
StringRef Extension,
bool UseTemporary,
bool CreateMissingDirectories) {
- std::string Error, OutputPathName, TempPathName;
- llvm::raw_fd_ostream *OS = createOutputFile(OutputPath, Error, Binary,
- RemoveFileOnSignal,
- InFile, Extension,
- UseTemporary,
- CreateMissingDirectories,
- &OutputPathName,
- &TempPathName);
+ std::string OutputPathName, TempPathName;
+ std::error_code EC;
+ llvm::raw_fd_ostream *OS = createOutputFile(
+ OutputPath, EC, Binary, RemoveFileOnSignal, InFile, Extension,
+ UseTemporary, CreateMissingDirectories, &OutputPathName, &TempPathName);
if (!OS) {
- getDiagnostics().Report(diag::err_fe_unable_to_open_output)
- << OutputPath << Error;
+ getDiagnostics().Report(diag::err_fe_unable_to_open_output) << OutputPath
+ << EC.message();
return nullptr;
}
@@ -591,17 +596,11 @@ CompilerInstance::createOutputFile(StringRef OutputPath,
return OS;
}
-llvm::raw_fd_ostream *
-CompilerInstance::createOutputFile(StringRef OutputPath,
- std::string &Error,
- bool Binary,
- bool RemoveFileOnSignal,
- StringRef InFile,
- StringRef Extension,
- bool UseTemporary,
- bool CreateMissingDirectories,
- std::string *ResultPathName,
- std::string *TempPathName) {
+llvm::raw_fd_ostream *CompilerInstance::createOutputFile(
+ StringRef OutputPath, std::error_code &Error, bool Binary,
+ bool RemoveFileOnSignal, StringRef InFile, StringRef Extension,
+ bool UseTemporary, bool CreateMissingDirectories,
+ std::string *ResultPathName, std::string *TempPathName) {
assert((!CreateMissingDirectories || UseTemporary) &&
"CreateMissingDirectories is only allowed when using temporary files");
@@ -670,9 +669,9 @@ CompilerInstance::createOutputFile(StringRef OutputPath,
if (!OS) {
OSFile = OutFile;
OS.reset(new llvm::raw_fd_ostream(
- OSFile.c_str(), Error,
+ OSFile, Error,
(Binary ? llvm::sys::fs::F_None : llvm::sys::fs::F_Text)));
- if (!Error.empty())
+ if (Error)
return nullptr;
}
@@ -705,7 +704,8 @@ bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
Kind = Input.isSystem() ? SrcMgr::C_System : SrcMgr::C_User;
if (Input.isBuffer()) {
- SourceMgr.setMainFileID(SourceMgr.createFileID(Input.getBuffer(), Kind));
+ SourceMgr.setMainFileID(SourceMgr.createFileID(
+ std::unique_ptr<llvm::MemoryBuffer>(Input.getBuffer()), Kind));
assert(!SourceMgr.getMainFileID().isInvalid() &&
"Couldn't establish MainFileID!");
return true;
@@ -727,14 +727,14 @@ bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
// pick up the correct size, and simply override their contents as we do for
// STDIN.
if (File->isNamedPipe()) {
- std::string ErrorStr;
- if (llvm::MemoryBuffer *MB =
- FileMgr.getBufferForFile(File, &ErrorStr, /*isVolatile=*/true)) {
+ auto MB = FileMgr.getBufferForFile(File, /*isVolatile=*/true);
+ if (MB) {
// Create a new virtual file that will have the correct size.
- File = FileMgr.getVirtualFile(InputFile, MB->getBufferSize(), 0);
- SourceMgr.overrideFileContents(File, MB);
+ File = FileMgr.getVirtualFile(InputFile, (*MB)->getBufferSize(), 0);
+ SourceMgr.overrideFileContents(File, std::move(*MB));
} else {
- Diags.Report(diag::err_cannot_open_file) << InputFile << ErrorStr;
+ Diags.Report(diag::err_cannot_open_file) << InputFile
+ << MB.getError().message();
return false;
}
}
@@ -754,7 +754,7 @@ bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
SB->getBufferSize(), 0);
SourceMgr.setMainFileID(
SourceMgr.createFileID(File, SourceLocation(), Kind));
- SourceMgr.overrideFileContents(File, SB.release());
+ SourceMgr.overrideFileContents(File, std::move(SB));
}
assert(!SourceMgr.getMainFileID().isInvalid() &&
@@ -802,8 +802,9 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
llvm::EnableStatistics();
for (unsigned i = 0, e = getFrontendOpts().Inputs.size(); i != e; ++i) {
- // Reset the ID tables if we are reusing the SourceManager.
- if (hasSourceManager())
+ // Reset the ID tables if we are reusing the SourceManager and parsing
+ // regular files.
+ if (hasSourceManager() && !Act.isModelParsingAction())
getSourceManager().clearIDTables();
if (Act.BeginSourceFile(*this, getFrontendOpts().Inputs[i])) {
@@ -951,17 +952,22 @@ static bool compileModuleImpl(CompilerInstance &ImportingInstance,
FrontendOpts.Inputs.push_back(
FrontendInputFile("__inferred_module.map", IK));
- llvm::MemoryBuffer *ModuleMapBuffer =
+ std::unique_ptr<llvm::MemoryBuffer> ModuleMapBuffer =
llvm::MemoryBuffer::getMemBuffer(InferredModuleMapContent);
ModuleMapFile = Instance.getFileManager().getVirtualFile(
"__inferred_module.map", InferredModuleMapContent.size(), 0);
- SourceMgr.overrideFileContents(ModuleMapFile, ModuleMapBuffer);
+ SourceMgr.overrideFileContents(ModuleMapFile, std::move(ModuleMapBuffer));
}
- // Construct a module-generating action. Passing through Module->ModuleMap is
+ // Construct a module-generating action. Passing through the module map is
// safe because the FileManager is shared between the compiler instances.
- GenerateModuleAction CreateModuleAction(Module->ModuleMap, Module->IsSystem);
-
+ GenerateModuleAction CreateModuleAction(
+ ModMap.getModuleMapFileForUniquing(Module), Module->IsSystem);
+
+ ImportingInstance.getDiagnostics().Report(ImportLoc,
+ diag::remark_module_build)
+ << Module->Name << ModuleFileName;
+
// Execute the action to actually build the module in-place. Use a separate
// thread so that we get a stack large enough.
const unsigned ThreadStackSize = 8 << 20;
@@ -969,6 +975,10 @@ static bool compileModuleImpl(CompilerInstance &ImportingInstance,
CRC.RunSafelyOnThread([&]() { Instance.ExecuteAction(CreateModuleAction); },
ThreadStackSize);
+ ImportingInstance.getDiagnostics().Report(ImportLoc,
+ diag::remark_module_build_done)
+ << Module->Name;
+
// Delete the temporary module map file.
// FIXME: Even though we're executing under crash protection, it would still
// be nice to do this with RemoveFileOnSignal when we can. However, that
@@ -988,9 +998,10 @@ static bool compileAndLoadModule(CompilerInstance &ImportingInstance,
SourceLocation ImportLoc,
SourceLocation ModuleNameLoc, Module *Module,
StringRef ModuleFileName) {
+ DiagnosticsEngine &Diags = ImportingInstance.getDiagnostics();
+
auto diagnoseBuildFailure = [&] {
- ImportingInstance.getDiagnostics().Report(ModuleNameLoc,
- diag::err_module_not_built)
+ Diags.Report(ModuleNameLoc, diag::err_module_not_built)
<< Module->Name << SourceRange(ImportLoc, ModuleNameLoc);
};
@@ -1004,6 +1015,8 @@ static bool compileAndLoadModule(CompilerInstance &ImportingInstance,
llvm::LockFileManager Locked(ModuleFileName);
switch (Locked) {
case llvm::LockFileManager::LFS_Error:
+ Diags.Report(ModuleNameLoc, diag::err_module_lock_failure)
+ << Module->Name;
return false;
case llvm::LockFileManager::LFS_Owned:
@@ -1027,7 +1040,7 @@ static bool compileAndLoadModule(CompilerInstance &ImportingInstance,
// Try to read the module file, now that we've compiled it.
ASTReader::ASTReadResult ReadResult =
ImportingInstance.getModuleManager()->ReadAST(
- ModuleFileName, serialization::MK_Module, ImportLoc,
+ ModuleFileName, serialization::MK_ImplicitModule, ImportLoc,
ModuleLoadCapabilities);
if (ReadResult == ASTReader::OutOfDate &&
@@ -1038,6 +1051,10 @@ static bool compileAndLoadModule(CompilerInstance &ImportingInstance,
continue;
} else if (ReadResult == ASTReader::Missing) {
diagnoseBuildFailure();
+ } else if (ReadResult != ASTReader::Success &&
+ !Diags.hasErrorOccurred()) {
+ // The ASTReader didn't diagnose the error, so conservatively report it.
+ diagnoseBuildFailure();
}
return ReadResult == ASTReader::Success;
}
@@ -1131,9 +1148,8 @@ static void checkConfigMacro(Preprocessor &PP, StringRef ConfigMacro,
/// \brief Write a new timestamp file with the given path.
static void writeTimestampFile(StringRef TimestampFile) {
- std::string ErrorInfo;
- llvm::raw_fd_ostream Out(TimestampFile.str().c_str(), ErrorInfo,
- llvm::sys::fs::F_None);
+ std::error_code EC;
+ llvm::raw_fd_ostream Out(TimestampFile.str(), EC, llvm::sys::fs::F_None);
}
/// \brief Prune the module cache of modules that haven't been accessed in
@@ -1251,6 +1267,65 @@ void CompilerInstance::createModuleManager() {
}
}
+bool CompilerInstance::loadModuleFile(StringRef FileName) {
+ // Helper to recursively read the module names for all modules we're adding.
+ // We mark these as known and redirect any attempt to load that module to
+ // the files we were handed.
+ struct ReadModuleNames : ASTReaderListener {
+ CompilerInstance &CI;
+ std::vector<StringRef> ModuleFileStack;
+ bool Failed;
+ bool TopFileIsModule;
+
+ ReadModuleNames(CompilerInstance &CI)
+ : CI(CI), Failed(false), TopFileIsModule(false) {}
+
+ bool needsImportVisitation() const override { return true; }
+
+ void visitImport(StringRef FileName) override {
+ ModuleFileStack.push_back(FileName);
+ if (ASTReader::readASTFileControlBlock(FileName, CI.getFileManager(),
+ *this)) {
+ CI.getDiagnostics().Report(SourceLocation(),
+ diag::err_module_file_not_found)
+ << FileName;
+ // FIXME: Produce a note stack explaining how we got here.
+ Failed = true;
+ }
+ ModuleFileStack.pop_back();
+ }
+
+ void ReadModuleName(StringRef ModuleName) override {
+ if (ModuleFileStack.size() == 1)
+ TopFileIsModule = true;
+
+ auto &ModuleFile = CI.ModuleFileOverrides[ModuleName];
+ if (!ModuleFile.empty() &&
+ CI.getFileManager().getFile(ModuleFile) !=
+ CI.getFileManager().getFile(ModuleFileStack.back()))
+ CI.getDiagnostics().Report(SourceLocation(),
+ diag::err_conflicting_module_files)
+ << ModuleName << ModuleFile << ModuleFileStack.back();
+ ModuleFile = ModuleFileStack.back();
+ }
+ } RMN(*this);
+
+ RMN.visitImport(FileName);
+
+ if (RMN.Failed)
+ return false;
+
+ // If we never found a module name for the top file, then it's not a module,
+ // it's a PCH or preamble or something.
+ if (!RMN.TopFileIsModule) {
+ getDiagnostics().Report(SourceLocation(), diag::err_module_file_not_module)
+ << FileName;
+ return false;
+ }
+
+ return true;
+}
+
ModuleLoadResult
CompilerInstance::loadModule(SourceLocation ImportLoc,
ModuleIdPath Path,
@@ -1265,7 +1340,8 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
// when both the preprocessor and parser see the same import declaration.
if (!ImportLoc.isInvalid() && LastModuleImportLoc == ImportLoc) {
// Make the named module visible.
- if (LastModuleImportResult && ModuleName != getLangOpts().CurrentModule)
+ if (LastModuleImportResult && ModuleName != getLangOpts().CurrentModule &&
+ ModuleName != getLangOpts().ImplementationOfModule)
ModuleManager->makeModuleVisible(LastModuleImportResult, Visibility,
ImportLoc, /*Complain=*/false);
return LastModuleImportResult;
@@ -1279,7 +1355,8 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
if (Known != KnownModules.end()) {
// Retrieve the cached top-level module.
Module = Known->second;
- } else if (ModuleName == getLangOpts().CurrentModule) {
+ } else if (ModuleName == getLangOpts().CurrentModule ||
+ ModuleName == getLangOpts().ImplementationOfModule) {
// This is the module we're building.
Module = PP->getHeaderSearchInfo().lookupModule(ModuleName);
Known = KnownModules.insert(std::make_pair(Path[0].first, Module)).first;
@@ -1294,8 +1371,12 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
return ModuleLoadResult();
}
+ auto Override = ModuleFileOverrides.find(ModuleName);
+ bool Explicit = Override != ModuleFileOverrides.end();
+
std::string ModuleFileName =
- PP->getHeaderSearchInfo().getModuleFileName(Module);
+ Explicit ? Override->second
+ : PP->getHeaderSearchInfo().getModuleFileName(Module);
// If we don't already have an ASTReader, create one now.
if (!ModuleManager)
@@ -1311,14 +1392,24 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
Listener->attachToASTReader(*ModuleManager);
// Try to load the module file.
- unsigned ARRFlags = ASTReader::ARR_OutOfDate | ASTReader::ARR_Missing;
- switch (ModuleManager->ReadAST(ModuleFileName, serialization::MK_Module,
+ unsigned ARRFlags =
+ Explicit ? 0 : ASTReader::ARR_OutOfDate | ASTReader::ARR_Missing;
+ switch (ModuleManager->ReadAST(ModuleFileName,
+ Explicit ? serialization::MK_ExplicitModule
+ : serialization::MK_ImplicitModule,
ImportLoc, ARRFlags)) {
case ASTReader::Success:
break;
case ASTReader::OutOfDate:
case ASTReader::Missing: {
+ if (Explicit) {
+ // ReadAST has already complained for us.
+ ModuleLoader::HadFatalFailure = true;
+ KnownModules[Path[0].first] = nullptr;
+ return ModuleLoadResult();
+ }
+
// The module file is missing or out-of-date. Build it.
assert(Module && "missing module file");
// Check whether there is a cycle in the module graph.
@@ -1342,9 +1433,6 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
return ModuleLoadResult();
}
- getDiagnostics().Report(ImportLoc, diag::remark_module_build)
- << ModuleName << ModuleFileName;
-
// Check whether we have already attempted to build this module (but
// failed).
if (getPreprocessorOpts().FailedModules &&
@@ -1359,6 +1447,8 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
// Try to compile and then load the module.
if (!compileAndLoadModule(*this, ImportLoc, ModuleNameLoc, Module,
ModuleFileName)) {
+ assert(getDiagnostics().hasErrorOccurred() &&
+ "undiagnosed error in compileAndLoadModule");
if (getPreprocessorOpts().FailedModules)
getPreprocessorOpts().FailedModules->addFailed(ModuleName);
KnownModules[Path[0].first] = nullptr;
@@ -1448,6 +1538,10 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
Module = Sub;
}
}
+
+ // Don't make the module visible if we are in the implementation.
+ if (ModuleName == getLangOpts().ImplementationOfModule)
+ return ModuleLoadResult(Module, false);
// Make the named module visible, if it's not already part of the module
// we are parsing.
@@ -1468,7 +1562,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
// Check whether this module is available.
clang::Module::Requirement Requirement;
- clang::Module::HeaderDirective MissingHeader;
+ clang::Module::UnresolvedHeaderDirective MissingHeader;
if (!Module->isAvailable(getLangOpts(), getTarget(), Requirement,
MissingHeader)) {
if (MissingHeader.FileNameLoc.isValid()) {
@@ -1497,9 +1591,16 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
Module, ImportLoc);
}
+ // Determine whether we're in the #include buffer for a module. The #includes
+ // in that buffer do not qualify as module imports; they're just an
+ // implementation detail of us building the module.
+ bool IsInModuleIncludes = !getLangOpts().CurrentModule.empty() &&
+ getSourceManager().getFileID(ImportLoc) ==
+ getSourceManager().getMainFileID();
+
// If this module import was due to an inclusion directive, create an
// implicit import declaration to capture it in the AST.
- if (IsInclusionDirective && hasASTContext()) {
+ if (IsInclusionDirective && hasASTContext() && !IsInModuleIncludes) {
TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
ImportLoc, Module,
@@ -1602,3 +1703,4 @@ CompilerInstance::lookupMissingImports(StringRef Name,
return false;
}
+void CompilerInstance::resetAndLeakSema() { BuryPointer(takeSema()); }
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index ce61a4653d2e..54025b06557a 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -10,6 +10,7 @@
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/Version.h"
+#include "clang/Config/config.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Util.h"
@@ -19,8 +20,8 @@
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Serialization/ASTReader.h"
#include "llvm/ADT/Hashing.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
@@ -215,6 +216,8 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
Opts.ShowCheckerHelp = Args.hasArg(OPT_analyzer_checker_help);
+ Opts.DisableAllChecks = Args.hasArg(OPT_analyzer_disable_all_checks);
+
Opts.visualizeExplodedGraphWithGraphViz =
Args.hasArg(OPT_analyzer_viz_egraph_graphviz);
Opts.visualizeExplodedGraphWithUbiGraph =
@@ -322,21 +325,38 @@ GenerateOptimizationRemarkRegex(DiagnosticsEngine &Diags, ArgList &Args,
return Pattern;
}
+static void parseSanitizerKinds(StringRef FlagName,
+ const std::vector<std::string> &Sanitizers,
+ DiagnosticsEngine &Diags, SanitizerSet &S) {
+ for (const auto &Sanitizer : Sanitizers) {
+ SanitizerKind K = llvm::StringSwitch<SanitizerKind>(Sanitizer)
+#define SANITIZER(NAME, ID) .Case(NAME, SanitizerKind::ID)
+#include "clang/Basic/Sanitizers.def"
+ .Default(SanitizerKind::Unknown);
+ if (K == SanitizerKind::Unknown)
+ Diags.Report(diag::err_drv_invalid_value) << FlagName << Sanitizer;
+ else
+ S.set(K, true);
+ }
+}
+
static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
DiagnosticsEngine &Diags,
const TargetOptions &TargetOpts) {
using namespace options;
bool Success = true;
- Opts.OptimizationLevel = getOptimizationLevel(Args, IK, Diags);
+ unsigned OptimizationLevel = getOptimizationLevel(Args, IK, Diags);
// TODO: This could be done in Driver
unsigned MaxOptLevel = 3;
- if (Opts.OptimizationLevel > MaxOptLevel) {
- // If the optimization level is not supported, fall back on the default optimization
+ if (OptimizationLevel > MaxOptLevel) {
+ // If the optimization level is not supported, fall back on the default
+ // optimization
Diags.Report(diag::warn_drv_optimization_value)
<< Args.getLastArg(OPT_O)->getAsString(Args) << "-O" << MaxOptLevel;
- Opts.OptimizationLevel = MaxOptLevel;
+ OptimizationLevel = MaxOptLevel;
}
+ Opts.OptimizationLevel = OptimizationLevel;
// We must always run at least the always inlining pass.
Opts.setInlining(
@@ -399,6 +419,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.SampleProfileFile = Args.getLastArgValue(OPT_fprofile_sample_use_EQ);
Opts.ProfileInstrGenerate = Args.hasArg(OPT_fprofile_instr_generate);
Opts.InstrProfileInput = Args.getLastArgValue(OPT_fprofile_instr_use_EQ);
+ Opts.CoverageMapping = Args.hasArg(OPT_fcoverage_mapping);
+ Opts.DumpCoverageMapping = Args.hasArg(OPT_dump_coverage_mapping);
Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose);
Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions);
Opts.CUDAIsDevice = Args.hasArg(OPT_fcuda_is_device);
@@ -413,16 +435,19 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.LessPreciseFPMAD = Args.hasArg(OPT_cl_mad_enable);
Opts.LimitFloatPrecision = Args.getLastArgValue(OPT_mlimit_float_precision);
Opts.NoInfsFPMath = (Args.hasArg(OPT_menable_no_infinities) ||
- Args.hasArg(OPT_cl_finite_math_only)||
+ Args.hasArg(OPT_cl_finite_math_only) ||
Args.hasArg(OPT_cl_fast_relaxed_math));
Opts.NoNaNsFPMath = (Args.hasArg(OPT_menable_no_nans) ||
- Args.hasArg(OPT_cl_finite_math_only)||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_finite_math_only) ||
Args.hasArg(OPT_cl_fast_relaxed_math));
+ Opts.NoSignedZeros = Args.hasArg(OPT_cl_no_signed_zeros);
Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss);
Opts.BackendOptions = Args.getAllArgValues(OPT_backend_option);
Opts.NumRegisterParameters = getLastArgIntValue(Args, OPT_mregparm, 0, Diags);
Opts.NoGlobalMerge = Args.hasArg(OPT_mno_global_merge);
Opts.NoExecStack = Args.hasArg(OPT_mno_exec_stack);
+ Opts.FatalWarnings = Args.hasArg(OPT_massembler_fatal_warnings);
Opts.EnableSegmentedStacks = Args.hasArg(OPT_split_stacks);
Opts.RelaxAll = Args.hasArg(OPT_mrelax_all);
Opts.OmitLeafFramePointer = Args.hasArg(OPT_momit_leaf_frame_pointer);
@@ -435,6 +460,11 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.UnwindTables = Args.hasArg(OPT_munwind_tables);
Opts.RelocationModel = Args.getLastArgValue(OPT_mrelocation_model, "pic");
+ Opts.ThreadModel = Args.getLastArgValue(OPT_mthread_model, "posix");
+ if (Opts.ThreadModel != "posix" && Opts.ThreadModel != "single")
+ Diags.Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_mthread_model)->getAsString(Args)
+ << Opts.ThreadModel;
Opts.TrapFuncName = Args.getLastArgValue(OPT_ftrap_function_EQ);
Opts.UseInitArray = Args.hasArg(OPT_fuse_init_array);
@@ -442,6 +472,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
OPT_fno_function_sections, false);
Opts.DataSections = Args.hasFlag(OPT_fdata_sections,
OPT_fno_data_sections, false);
+ Opts.MergeFunctions = Args.hasArg(OPT_fmerge_functions);
Opts.VectorizeBB = Args.hasArg(OPT_vectorize_slp_aggressive);
Opts.VectorizeLoop = Args.hasArg(OPT_vectorize_loops);
@@ -449,13 +480,12 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.MainFileName = Args.getLastArgValue(OPT_main_file_name);
Opts.VerifyModule = !Args.hasArg(OPT_disable_llvm_verifier);
- Opts.SanitizeRecover = !Args.hasArg(OPT_fno_sanitize_recover);
Opts.DisableGCov = Args.hasArg(OPT_test_coverage);
Opts.EmitGcovArcs = Args.hasArg(OPT_femit_coverage_data);
Opts.EmitGcovNotes = Args.hasArg(OPT_femit_coverage_notes);
if (Opts.EmitGcovArcs || Opts.EmitGcovNotes) {
- Opts.CoverageFile = Args.getLastArgValue(OPT_coverage_file);
+ Opts.CoverageFile = Args.getLastArgValue(OPT_coverage_file);
Opts.CoverageExtraChecksum = Args.hasArg(OPT_coverage_cfg_checksum);
Opts.CoverageNoFunctionNamesInData =
Args.hasArg(OPT_coverage_no_function_names_in_data);
@@ -477,7 +507,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.CompressDebugSections = Args.hasArg(OPT_compress_debug_sections);
Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
Opts.LinkBitcodeFile = Args.getLastArgValue(OPT_mlink_bitcode_file);
- Opts.SanitizerBlacklistFile = Args.getLastArgValue(OPT_fsanitize_blacklist);
+ Opts.SanitizeCoverage =
+ getLastArgIntValue(Args, OPT_fsanitize_coverage, 0, Diags);
Opts.SanitizeMemoryTrackOrigins =
getLastArgIntValue(Args, OPT_fsanitize_memory_track_origins_EQ, 0, Diags);
Opts.SanitizeUndefinedTrapOnError =
@@ -566,11 +597,25 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
NeedLocTracking = true;
}
- // If the user requested one of the flags in the -Rpass family, make sure
- // that the backend tracks source location information.
+ // If the user requested to use a sample profile for PGO, then the
+ // backend will need to track source location information so the profile
+ // can be incorporated into the IR.
+ if (!Opts.SampleProfileFile.empty())
+ NeedLocTracking = true;
+
+ // If the user requested a flag that requires source locations available in
+ // the backend, make sure that the backend tracks source location information.
if (NeedLocTracking && Opts.getDebugInfo() == CodeGenOptions::NoDebugInfo)
Opts.setDebugInfo(CodeGenOptions::LocTrackingOnly);
+ Opts.RewriteMapFiles = Args.getAllArgValues(OPT_frewrite_map_file);
+
+ // Parse -fsanitize-recover= arguments.
+ // FIXME: Report unrecoverable sanitizers incorrectly specified here.
+ parseSanitizerKinds("-fsanitize-recover=",
+ Args.getAllArgValues(OPT_fsanitize_recover_EQ), Diags,
+ Opts.SanitizeRecover);
+
return Success;
}
@@ -597,8 +642,9 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
bool Success = true;
Opts.DiagnosticLogFile = Args.getLastArgValue(OPT_diagnostic_log_file);
- Opts.DiagnosticSerializationFile =
- Args.getLastArgValue(OPT_diagnostic_serialized_file);
+ if (Arg *A =
+ Args.getLastArg(OPT_diagnostic_serialized_file, OPT__serialize_diags))
+ Opts.DiagnosticSerializationFile = A->getValue();
Opts.IgnoreWarnings = Args.hasArg(OPT_w);
Opts.NoRewriteMacros = Args.hasArg(OPT_Wno_rewrite_macros);
Opts.Pedantic = Args.hasArg(OPT_pedantic);
@@ -686,6 +732,9 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Opts.ConstexprBacktraceLimit = getLastArgIntValue(
Args, OPT_fconstexpr_backtrace_limit,
DiagnosticOptions::DefaultConstexprBacktraceLimit, Diags);
+ Opts.SpellCheckingLimit = getLastArgIntValue(
+ Args, OPT_fspell_checking_limit,
+ DiagnosticOptions::DefaultSpellCheckingLimit, Diags);
Opts.TabStop = getLastArgIntValue(Args, OPT_ftabstop,
DiagnosticOptions::DefaultTabStop, Diags);
if (Opts.TabStop == 0 || Opts.TabStop > DiagnosticOptions::MaxTabStop) {
@@ -716,6 +765,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
case OPT_ast_list:
Opts.ProgramAction = frontend::ASTDeclList; break;
case OPT_ast_dump:
+ case OPT_ast_dump_lookups:
Opts.ProgramAction = frontend::ASTDump; break;
case OPT_ast_print:
Opts.ProgramAction = frontend::ASTPrint; break;
@@ -823,11 +873,14 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.FixOnlyWarnings = Args.hasArg(OPT_fix_only_warnings);
Opts.FixAndRecompile = Args.hasArg(OPT_fixit_recompile);
Opts.FixToTemporaries = Args.hasArg(OPT_fixit_to_temp);
+ Opts.ASTDumpDecls = Args.hasArg(OPT_ast_dump);
Opts.ASTDumpFilter = Args.getLastArgValue(OPT_ast_dump_filter);
Opts.ASTDumpLookups = Args.hasArg(OPT_ast_dump_lookups);
Opts.UseGlobalModuleIndex = !Args.hasArg(OPT_fno_modules_global_index);
Opts.GenerateGlobalModuleIndex = Opts.UseGlobalModuleIndex;
-
+ Opts.ModuleMapFiles = Args.getAllArgValues(OPT_fmodule_map_file);
+ Opts.ModuleFiles = Args.getAllArgValues(OPT_fmodule_file);
+
Opts.CodeCompleteOpts.IncludeMacros
= Args.hasArg(OPT_code_completion_macros);
Opts.CodeCompleteOpts.IncludeCodePatterns
@@ -866,6 +919,8 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Literals;
if (Args.hasArg(OPT_objcmt_migrate_subscripting))
Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Subscripting;
+ if (Args.hasArg(OPT_objcmt_migrate_property_dot_syntax))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_PropertyDotSyntax;
if (Args.hasArg(OPT_objcmt_migrate_property))
Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Property;
if (Args.hasArg(OPT_objcmt_migrate_readonly_property))
@@ -950,14 +1005,19 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
void *MainAddr) {
- SmallString<128> P(llvm::sys::fs::getMainExecutable(Argv0, MainAddr));
-
- if (!P.empty()) {
- llvm::sys::path::remove_filename(P); // Remove /clang from foo/bin/clang
- llvm::sys::path::remove_filename(P); // Remove /bin from foo/bin
-
- // Get foo/lib/clang/<version>/include
- llvm::sys::path::append(P, "lib", "clang", CLANG_VERSION_STRING);
+ std::string ClangExecutable =
+ llvm::sys::fs::getMainExecutable(Argv0, MainAddr);
+ StringRef Dir = llvm::sys::path::parent_path(ClangExecutable);
+
+ // Compute the path to the resource directory.
+ StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
+ SmallString<128> P(Dir);
+ if (ClangResourceDir != "") {
+ llvm::sys::path::append(P, ClangResourceDir);
+ } else {
+ StringRef ClangLibdirSuffix(CLANG_LIBDIR_SUFFIX);
+ llvm::sys::path::append(P, "..", Twine("lib") + ClangLibdirSuffix, "clang",
+ CLANG_VERSION_STRING);
}
return P.str();
@@ -978,6 +1038,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
Opts.DisableModuleHash = Args.hasArg(OPT_fdisable_module_hash);
// -fmodules implies -fmodule-maps
Opts.ModuleMaps = Args.hasArg(OPT_fmodule_maps) || Args.hasArg(OPT_fmodules);
+ Opts.ModuleMapFileHomeIsCwd = Args.hasArg(OPT_fmodule_map_file_home_is_cwd);
Opts.ModuleCachePruneInterval =
getLastArgIntValue(Args, OPT_fmodules_prune_interval, 7 * 24 * 60 * 60);
Opts.ModuleCachePruneAfter =
@@ -995,9 +1056,6 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
StringRef MacroDef = (*it)->getValue();
Opts.ModulesIgnoreMacros.insert(MacroDef.split('=').first);
}
- std::vector<std::string> ModuleMapFiles =
- Args.getAllArgValues(OPT_fmodule_map_file);
- Opts.ModuleMapFiles.insert(ModuleMapFiles.begin(), ModuleMapFiles.end());
// Add -I..., -F..., and -index-header-map options in order.
bool IsIndexHeaderMap = false;
@@ -1118,7 +1176,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
case IK_PreprocessedC:
case IK_ObjC:
case IK_PreprocessedObjC:
- LangStd = LangStandard::lang_gnu99;
+ LangStd = LangStandard::lang_gnu11;
break;
case IK_CXX:
case IK_PreprocessedCXX:
@@ -1135,7 +1193,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.C11 = Std.isC11();
Opts.CPlusPlus = Std.isCPlusPlus();
Opts.CPlusPlus11 = Std.isCPlusPlus11();
- Opts.CPlusPlus1y = Std.isCPlusPlus1y();
+ Opts.CPlusPlus14 = Std.isCPlusPlus14();
Opts.CPlusPlus1z = Std.isCPlusPlus1z();
Opts.Digraphs = Std.hasDigraphs();
Opts.GNUMode = Std.isGNUMode();
@@ -1148,10 +1206,12 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
if (LangStd == LangStandard::lang_opencl)
Opts.OpenCLVersion = 100;
else if (LangStd == LangStandard::lang_opencl11)
- Opts.OpenCLVersion = 110;
+ Opts.OpenCLVersion = 110;
else if (LangStd == LangStandard::lang_opencl12)
Opts.OpenCLVersion = 120;
-
+ else if (LangStd == LangStandard::lang_opencl20)
+ Opts.OpenCLVersion = 200;
+
// OpenCL has some additional defaults.
if (Opts.OpenCL) {
Opts.AltiVec = 0;
@@ -1175,15 +1235,10 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.GNUKeywords = Opts.GNUMode;
Opts.CXXOperatorNames = Opts.CPlusPlus;
- // Mimicing gcc's behavior, trigraphs are only enabled if -trigraphs
- // is specified, or -std is set to a conforming mode.
- // Trigraphs are disabled by default in c++1z onwards.
- Opts.Trigraphs = !Opts.GNUMode && !Opts.CPlusPlus1z;
-
Opts.DollarIdents = !Opts.AsmPreprocessor;
- // C++1y onwards has sized global deallocation functions.
- Opts.SizedDeallocation = Opts.CPlusPlus1y;
+ // C++14 onwards has sized global deallocation functions.
+ Opts.SizedDeallocation = Opts.CPlusPlus14;
}
/// Attempt to parse a visibility value out of the given argument.
@@ -1299,6 +1354,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
.Case("CL", LangStandard::lang_opencl)
.Case("CL1.1", LangStandard::lang_opencl11)
.Case("CL1.2", LangStandard::lang_opencl12)
+ .Case("CL2.0", LangStandard::lang_opencl20)
.Default(LangStandard::lang_unspecified);
if (OpenCLLangStd == LangStandard::lang_unspecified) {
@@ -1322,6 +1378,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_fno_operator_names))
Opts.CXXOperatorNames = 0;
+ if (Args.hasArg(OPT_fcuda_is_device))
+ Opts.CUDAIsDevice = 1;
+
if (Opts.ObjC1) {
if (Arg *arg = Args.getLastArg(OPT_fobjc_runtime_EQ)) {
StringRef value = arg->getValue();
@@ -1400,17 +1459,22 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
else if (Args.hasArg(OPT_fwrapv))
Opts.setSignedOverflowBehavior(LangOptions::SOB_Defined);
- if (Args.hasArg(OPT_trigraphs))
- Opts.Trigraphs = 1;
+ Opts.MSVCCompat = Args.hasArg(OPT_fms_compatibility);
+ Opts.MicrosoftExt = Opts.MSVCCompat || Args.hasArg(OPT_fms_extensions);
+ Opts.AsmBlocks = Args.hasArg(OPT_fasm_blocks) || Opts.MicrosoftExt;
+ Opts.MSCompatibilityVersion = parseMSCVersion(Args, Diags);
+
+ // Mimicing gcc's behavior, trigraphs are only enabled if -trigraphs
+ // is specified, or -std is set to a conforming mode.
+ // Trigraphs are disabled by default in c++1z onwards.
+ Opts.Trigraphs = !Opts.GNUMode && !Opts.MSVCCompat && !Opts.CPlusPlus1z;
+ Opts.Trigraphs =
+ Args.hasFlag(OPT_ftrigraphs, OPT_fno_trigraphs, Opts.Trigraphs);
Opts.DollarIdents = Args.hasFlag(OPT_fdollars_in_identifiers,
OPT_fno_dollars_in_identifiers,
Opts.DollarIdents);
Opts.PascalStrings = Args.hasArg(OPT_fpascal_strings);
- Opts.MSVCCompat = Args.hasArg(OPT_fms_compatibility);
- Opts.MicrosoftExt = Opts.MSVCCompat || Args.hasArg(OPT_fms_extensions);
- Opts.AsmBlocks = Args.hasArg(OPT_fasm_blocks) || Opts.MicrosoftExt;
- Opts.MSCompatibilityVersion = parseMSCVersion(Args, Diags);
Opts.VtorDispMode = getLastArgIntValue(Args, OPT_vtordisp_mode_EQ, 1, Diags);
Opts.Borland = Args.hasArg(OPT_fborland_extensions);
Opts.WritableStrings = Args.hasArg(OPT_fwritable_strings);
@@ -1438,6 +1502,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
!Args.hasArg(OPT_fno_modules_search_all) &&
Args.hasArg(OPT_fmodules_search_all);
Opts.ModulesErrorRecovery = !Args.hasArg(OPT_fno_modules_error_recovery);
+ Opts.ModulesImplicitMaps = Args.hasFlag(OPT_fmodules_implicit_maps,
+ OPT_fno_modules_implicit_maps, true);
Opts.CharIsSigned = Opts.OpenCL || !Args.hasArg(OPT_fno_signed_char);
Opts.WChar = Opts.CPlusPlus && !Args.hasArg(OPT_fno_wchar);
Opts.ShortWChar = Args.hasFlag(OPT_fshort_wchar, OPT_fno_short_wchar, false);
@@ -1472,6 +1538,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_fencode_extended_block_signature);
Opts.EmitAllDecls = Args.hasArg(OPT_femit_all_decls);
Opts.PackStruct = getLastArgIntValue(Args, OPT_fpack_struct_EQ, 0, Diags);
+ Opts.MaxTypeAlign = getLastArgIntValue(Args, OPT_fmax_type_align_EQ, 0, Diags);
Opts.PICLevel = getLastArgIntValue(Args, OPT_pic_level, 0, Diags);
Opts.PIELevel = getLastArgIntValue(Args, OPT_pie_level, 0, Diags);
Opts.Static = Args.hasArg(OPT_static_define);
@@ -1492,6 +1559,16 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.DebuggerObjCLiteral = Args.hasArg(OPT_fdebugger_objc_literal);
Opts.ApplePragmaPack = Args.hasArg(OPT_fapple_pragma_pack);
Opts.CurrentModule = Args.getLastArgValue(OPT_fmodule_name);
+ Opts.ImplementationOfModule =
+ Args.getLastArgValue(OPT_fmodule_implementation_of);
+ Opts.NativeHalfType = Opts.NativeHalfType;
+ Opts.HalfArgsAndReturns = Args.hasArg(OPT_fallow_half_arguments_and_returns);
+
+ if (!Opts.CurrentModule.empty() && !Opts.ImplementationOfModule.empty() &&
+ Opts.CurrentModule != Opts.ImplementationOfModule) {
+ Diags.Report(diag::err_conflicting_module_names)
+ << Opts.CurrentModule << Opts.ImplementationOfModule;
+ }
if (Arg *A = Args.getLastArg(OPT_faddress_space_map_mangling_EQ)) {
switch (llvm::StringSwitch<unsigned>(A->getValue())
@@ -1556,8 +1633,11 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// inlining enabled.
Opts.NoInlineDefine = !Opt || Args.hasArg(OPT_fno_inline);
- Opts.FastMath = Args.hasArg(OPT_ffast_math);
- Opts.FiniteMathOnly = Args.hasArg(OPT_ffinite_math_only);
+ Opts.FastMath = Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.FiniteMathOnly = Args.hasArg(OPT_ffinite_math_only) ||
+ Args.hasArg(OPT_cl_finite_math_only) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.RetainCommentsFromSystemHeaders =
Args.hasArg(OPT_fretain_comments_from_system_headers);
@@ -1575,35 +1655,12 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
// Parse -fsanitize= arguments.
- std::vector<std::string> Sanitizers = Args.getAllArgValues(OPT_fsanitize_EQ);
- for (unsigned I = 0, N = Sanitizers.size(); I != N; ++I) {
- // Since the Opts.Sanitize* values are bitfields, it's a little tricky to
- // efficiently map string values to them. Perform the mapping indirectly:
- // convert strings to enumerated values, then switch over the enum to set
- // the right bitfield value.
- enum Sanitizer {
-#define SANITIZER(NAME, ID) \
- ID,
-#include "clang/Basic/Sanitizers.def"
- Unknown
- };
- switch (llvm::StringSwitch<unsigned>(Sanitizers[I])
-#define SANITIZER(NAME, ID) \
- .Case(NAME, ID)
-#include "clang/Basic/Sanitizers.def"
- .Default(Unknown)) {
-#define SANITIZER(NAME, ID) \
- case ID: \
- Opts.Sanitize.ID = true; \
- break;
-#include "clang/Basic/Sanitizers.def"
-
- case Unknown:
- Diags.Report(diag::err_drv_invalid_value)
- << "-fsanitize=" << Sanitizers[I];
- break;
- }
- }
+ parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
+ Diags, Opts.Sanitize);
+ // -fsanitize-address-field-padding=N has to be a LangOpt, parse it here.
+ Opts.SanitizeAddressFieldPadding =
+ getLastArgIntValue(Args, OPT_fsanitize_address_field_padding, 0, Diags);
+ Opts.SanitizerBlacklistFile = Args.getLastArgValue(OPT_fsanitize_blacklist);
}
static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
@@ -2021,7 +2078,7 @@ createVFSFromCompilerInvocation(const CompilerInvocation &CI,
}
IntrusiveRefCntPtr<vfs::FileSystem> FS =
- vfs::getVFSFromYAML(Buffer->release(), /*DiagHandler*/ nullptr);
+ vfs::getVFSFromYAML(std::move(Buffer.get()), /*DiagHandler*/ nullptr);
if (!FS.get()) {
Diags.Report(diag::err_invalid_vfs_overlay) << File;
return IntrusiveRefCntPtr<vfs::FileSystem>();
diff --git a/lib/Frontend/CreateInvocationFromCommandLine.cpp b/lib/Frontend/CreateInvocationFromCommandLine.cpp
index f2f36e4cacb2..4a8a8a029e79 100644
--- a/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -71,13 +71,13 @@ clang::createInvocationFromCommandLine(ArrayRef<const char *> ArgList,
return nullptr;
}
- const driver::Command *Cmd = cast<driver::Command>(*Jobs.begin());
- if (StringRef(Cmd->getCreator().getName()) != "clang") {
+ const driver::Command &Cmd = cast<driver::Command>(*Jobs.begin());
+ if (StringRef(Cmd.getCreator().getName()) != "clang") {
Diags->Report(diag::err_fe_expected_clang_command);
return nullptr;
}
- const ArgStringList &CCArgs = Cmd->getArguments();
+ const ArgStringList &CCArgs = Cmd.getArguments();
std::unique_ptr<CompilerInvocation> CI(new CompilerInvocation());
if (!CompilerInvocation::CreateFromArgs(*CI,
const_cast<const char **>(CCArgs.data()),
diff --git a/lib/Frontend/DependencyFile.cpp b/lib/Frontend/DependencyFile.cpp
index 0b9c0d47dc36..6ea8f5193ef7 100644
--- a/lib/Frontend/DependencyFile.cpp
+++ b/lib/Frontend/DependencyFile.cpp
@@ -22,6 +22,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Serialization/ASTReader.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
@@ -108,23 +109,32 @@ struct DepCollectorASTListener : public ASTReaderListener {
void DependencyCollector::maybeAddDependency(StringRef Filename, bool FromModule,
bool IsSystem, bool IsModuleFile,
bool IsMissing) {
- if (Seen.insert(Filename) &&
+ if (Seen.insert(Filename).second &&
sawDependency(Filename, FromModule, IsSystem, IsModuleFile, IsMissing))
Dependencies.push_back(Filename);
}
+static bool isSpecialFilename(StringRef Filename) {
+ return llvm::StringSwitch<bool>(Filename)
+ .Case("<built-in>", true)
+ .Case("<stdin>", true)
+ .Default(false);
+}
+
bool DependencyCollector::sawDependency(StringRef Filename, bool FromModule,
bool IsSystem, bool IsModuleFile,
bool IsMissing) {
- return Filename != "<built-in>" && (needSystemDependencies() || !IsSystem);
+ return !isSpecialFilename(Filename) &&
+ (needSystemDependencies() || !IsSystem);
}
DependencyCollector::~DependencyCollector() { }
void DependencyCollector::attachToPreprocessor(Preprocessor &PP) {
- PP.addPPCallbacks(new DepCollectorPPCallbacks(*this, PP.getSourceManager()));
+ PP.addPPCallbacks(
+ llvm::make_unique<DepCollectorPPCallbacks>(*this, PP.getSourceManager()));
}
void DependencyCollector::attachToASTReader(ASTReader &R) {
- R.addListener(new DepCollectorASTListener(*this));
+ R.addListener(llvm::make_unique<DepCollectorASTListener>(*this));
}
namespace {
@@ -203,21 +213,21 @@ DependencyFileGenerator *DependencyFileGenerator::CreateAndAttachToPreprocessor(
PP.SetSuppressIncludeNotFoundError(true);
DFGImpl *Callback = new DFGImpl(&PP, Opts);
- PP.addPPCallbacks(Callback); // PP owns the Callback
+ PP.addPPCallbacks(std::unique_ptr<PPCallbacks>(Callback));
return new DependencyFileGenerator(Callback);
}
void DependencyFileGenerator::AttachToASTReader(ASTReader &R) {
DFGImpl *I = reinterpret_cast<DFGImpl *>(Impl);
assert(I && "missing implementation");
- R.addListener(new DFGASTReaderListener(*I));
+ R.addListener(llvm::make_unique<DFGASTReaderListener>(*I));
}
/// FileMatchesDepCriteria - Determine whether the given Filename should be
/// considered as a dependency.
bool DFGImpl::FileMatchesDepCriteria(const char *Filename,
SrcMgr::CharacteristicKind FileType) {
- if (strcmp("<built-in>", Filename) == 0)
+ if (isSpecialFilename(Filename))
return false;
if (IncludeSystemHeaders)
@@ -275,7 +285,7 @@ void DFGImpl::InclusionDirective(SourceLocation HashLoc,
}
void DFGImpl::AddFilename(StringRef Filename) {
- if (FilesSet.insert(Filename))
+ if (FilesSet.insert(Filename).second)
Files.push_back(Filename);
}
@@ -297,11 +307,11 @@ void DFGImpl::OutputDependencyFile() {
return;
}
- std::string Err;
- llvm::raw_fd_ostream OS(OutputFile.c_str(), Err, llvm::sys::fs::F_Text);
- if (!Err.empty()) {
- PP->getDiagnostics().Report(diag::err_fe_error_opening)
- << OutputFile << Err;
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::F_Text);
+ if (EC) {
+ PP->getDiagnostics().Report(diag::err_fe_error_opening) << OutputFile
+ << EC.message();
return;
}
diff --git a/lib/Frontend/DependencyGraph.cpp b/lib/Frontend/DependencyGraph.cpp
index 051b7f9e1412..67a977e38be2 100644
--- a/lib/Frontend/DependencyGraph.cpp
+++ b/lib/Frontend/DependencyGraph.cpp
@@ -61,7 +61,8 @@ public:
void clang::AttachDependencyGraphGen(Preprocessor &PP, StringRef OutputFile,
StringRef SysRoot) {
- PP.addPPCallbacks(new DependencyGraphCallback(&PP, OutputFile, SysRoot));
+ PP.addPPCallbacks(llvm::make_unique<DependencyGraphCallback>(&PP, OutputFile,
+ SysRoot));
}
void DependencyGraphCallback::InclusionDirective(SourceLocation HashLoc,
@@ -96,11 +97,11 @@ DependencyGraphCallback::writeNodeReference(raw_ostream &OS,
}
void DependencyGraphCallback::OutputGraphFile() {
- std::string Err;
- llvm::raw_fd_ostream OS(OutputFile.c_str(), Err, llvm::sys::fs::F_Text);
- if (!Err.empty()) {
- PP->getDiagnostics().Report(diag::err_fe_error_opening)
- << OutputFile << Err;
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::F_Text);
+ if (EC) {
+ PP->getDiagnostics().Report(diag::err_fe_error_opening) << OutputFile
+ << EC.message();
return;
}
diff --git a/lib/Frontend/DiagnosticRenderer.cpp b/lib/Frontend/DiagnosticRenderer.cpp
index cff32b852bf4..c63e98dbe4f1 100644
--- a/lib/Frontend/DiagnosticRenderer.cpp
+++ b/lib/Frontend/DiagnosticRenderer.cpp
@@ -193,7 +193,7 @@ void DiagnosticRenderer::emitStoredDiagnostic(StoredDiagnostic &Diag) {
void DiagnosticRenderer::emitBasicNote(StringRef Message) {
emitDiagnosticMessage(
SourceLocation(), PresumedLoc(), DiagnosticsEngine::Note, Message,
- ArrayRef<CharSourceRange>(), nullptr, DiagOrStoredDiag());
+ None, nullptr, DiagOrStoredDiag());
}
/// \brief Prints an include stack when appropriate for a particular
@@ -509,6 +509,6 @@ DiagnosticNoteRenderer::emitBuildingModuleLocation(SourceLocation Loc,
Message << "while building module '" << ModuleName << "' imported from "
<< PLoc.getFilename() << ':' << PLoc.getLine() << ":";
else
- Message << "while building module '" << ModuleName << ":";
+ Message << "while building module '" << ModuleName << "':";
emitNote(Loc, Message.str(), &SM);
}
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index 791017924d69..c81c81aba4d2 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -129,14 +129,15 @@ FrontendAction::FrontendAction() : Instance(nullptr) {}
FrontendAction::~FrontendAction() {}
void FrontendAction::setCurrentInput(const FrontendInputFile &CurrentInput,
- ASTUnit *AST) {
+ std::unique_ptr<ASTUnit> AST) {
this->CurrentInput = CurrentInput;
- CurrentASTUnit.reset(AST);
+ CurrentASTUnit = std::move(AST);
}
-ASTConsumer* FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
- ASTConsumer* Consumer = CreateASTConsumer(CI, InFile);
+std::unique_ptr<ASTConsumer>
+FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ std::unique_ptr<ASTConsumer> Consumer = CreateASTConsumer(CI, InFile);
if (!Consumer)
return nullptr;
@@ -145,7 +146,8 @@ ASTConsumer* FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
// Make sure the non-plugin consumer is first, so that plugins can't
// modifiy the AST.
- std::vector<ASTConsumer*> Consumers(1, Consumer);
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
+ Consumers.push_back(std::move(Consumer));
for (size_t i = 0, e = CI.getFrontendOpts().AddPluginActions.size();
i != e; ++i) {
@@ -155,16 +157,15 @@ ASTConsumer* FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
it = FrontendPluginRegistry::begin(),
ie = FrontendPluginRegistry::end();
it != ie; ++it) {
- if (it->getName() == CI.getFrontendOpts().AddPluginActions[i]) {
- std::unique_ptr<PluginASTAction> P(it->instantiate());
- FrontendAction* c = P.get();
- if (P->ParseArgs(CI, CI.getFrontendOpts().AddPluginArgs[i]))
- Consumers.push_back(c->CreateASTConsumer(CI, InFile));
- }
+ if (it->getName() != CI.getFrontendOpts().AddPluginActions[i])
+ continue;
+ std::unique_ptr<PluginASTAction> P = it->instantiate();
+ if (P->ParseArgs(CI, CI.getFrontendOpts().AddPluginArgs[i]))
+ Consumers.push_back(P->CreateASTConsumer(CI, InFile));
}
}
- return new MultiplexConsumer(Consumers);
+ return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
}
bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
@@ -189,13 +190,12 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(&CI.getDiagnostics());
- ASTUnit *AST = ASTUnit::LoadFromASTFile(InputFile, Diags,
- CI.getFileSystemOpts());
+ std::unique_ptr<ASTUnit> AST =
+ ASTUnit::LoadFromASTFile(InputFile, Diags, CI.getFileSystemOpts());
+
if (!AST)
goto failure;
- setCurrentInput(Input, AST);
-
// Inform the diagnostic client we are processing a source file.
CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), nullptr);
HasBegunSourceFile = true;
@@ -207,6 +207,8 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.setPreprocessor(&AST->getPreprocessor());
CI.setASTContext(&AST->getASTContext());
+ setCurrentInput(Input, std::move(AST));
+
// Initialize the action.
if (!BeginSourceFileAction(CI, InputFile))
goto failure;
@@ -285,8 +287,10 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
}
}
- // Set up the preprocessor.
- CI.createPreprocessor(getTranslationUnitKind());
+ // Set up the preprocessor if needed. When parsing model files the
+ // preprocessor of the original source is reused.
+ if (!isModelParsingAction())
+ CI.createPreprocessor(getTranslationUnitKind());
// Inform the diagnostic client we are processing a source file.
CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(),
@@ -305,15 +309,19 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// Create the AST context and consumer unless this is a preprocessor only
// action.
if (!usesPreprocessorOnly()) {
- CI.createASTContext();
+ // Parsing a model file should reuse the existing ASTContext.
+ if (!isModelParsingAction())
+ CI.createASTContext();
- std::unique_ptr<ASTConsumer> Consumer(
- CreateWrappedASTConsumer(CI, InputFile));
+ std::unique_ptr<ASTConsumer> Consumer =
+ CreateWrappedASTConsumer(CI, InputFile);
if (!Consumer)
goto failure;
- CI.getASTContext().setASTMutationListener(Consumer->GetASTMutationListener());
-
+ // FIXME: should not overwrite ASTMutationListener when parsing model files?
+ if (!isModelParsingAction())
+ CI.getASTContext().setASTMutationListener(Consumer->GetASTMutationListener());
+
if (!CI.getPreprocessorOpts().ChainedIncludes.empty()) {
// Convert headers to PCH and chain them.
IntrusiveRefCntPtr<ExternalSemaSource> source, FinalReader;
@@ -349,7 +357,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
goto failure;
}
- CI.setASTConsumer(Consumer.release());
+ CI.setASTConsumer(std::move(Consumer));
if (!CI.hasASTConsumer())
goto failure;
}
@@ -375,6 +383,11 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
"doesn't support modules");
}
+ // If we were asked to load any module files, do so now.
+ for (const auto &ModuleFile : CI.getFrontendOpts().ModuleFiles)
+ if (!CI.loadModuleFile(ModuleFile))
+ goto failure;
+
// If there is a layout overrides file, attach an external AST source that
// provides the layouts from that file.
if (!CI.getFrontendOpts().OverrideRecordLayoutsFile.empty() &&
@@ -432,6 +445,10 @@ void FrontendAction::EndSourceFile() {
// Inform the diagnostic client we are done with this source file.
CI.getDiagnosticClient().EndSourceFile();
+ // Inform the preprocessor we are done.
+ if (CI.hasPreprocessor())
+ CI.getPreprocessor().EndSourceFile();
+
// Finalize the action.
EndSourceFileAction();
@@ -444,7 +461,7 @@ void FrontendAction::EndSourceFile() {
CI.resetAndLeakSema();
CI.resetAndLeakASTContext();
}
- BuryPointer(CI.takeASTConsumer());
+ BuryPointer(CI.takeASTConsumer().get());
} else {
if (!isCurrentFileAST()) {
CI.setSema(nullptr);
@@ -453,10 +470,6 @@ void FrontendAction::EndSourceFile() {
CI.setASTConsumer(nullptr);
}
- // Inform the preprocessor we are done.
- if (CI.hasPreprocessor())
- CI.getPreprocessor().EndSourceFile();
-
if (CI.getFrontendOpts().ShowStats) {
llvm::errs() << "\nSTATISTICS FOR '" << getCurrentFile() << "':\n";
CI.getPreprocessor().PrintStats();
@@ -516,14 +529,15 @@ void ASTFrontendAction::ExecuteAction() {
void PluginASTAction::anchor() { }
-ASTConsumer *
+std::unique_ptr<ASTConsumer>
PreprocessorFrontendAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
llvm_unreachable("Invalid CreateASTConsumer on preprocessor action!");
}
-ASTConsumer *WrapperFrontendAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+WrapperFrontendAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
return WrappedAction->CreateASTConsumer(CI, InFile);
}
bool WrapperFrontendAction::BeginInvocation(CompilerInstance &CI) {
diff --git a/lib/Frontend/FrontendActions.cpp b/lib/Frontend/FrontendActions.cpp
index 6dcaf382c3c4..701ef026d49c 100644
--- a/lib/Frontend/FrontendActions.cpp
+++ b/lib/Frontend/FrontendActions.cpp
@@ -33,9 +33,9 @@ using namespace clang;
// Custom Actions
//===----------------------------------------------------------------------===//
-ASTConsumer *InitOnlyAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
- return new ASTConsumer();
+std::unique_ptr<ASTConsumer>
+InitOnlyAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return llvm::make_unique<ASTConsumer>();
}
void InitOnlyAction::ExecuteAction() {
@@ -45,36 +45,38 @@ void InitOnlyAction::ExecuteAction() {
// AST Consumer Actions
//===----------------------------------------------------------------------===//
-ASTConsumer *ASTPrintAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+ASTPrintAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile))
return CreateASTPrinter(OS, CI.getFrontendOpts().ASTDumpFilter);
return nullptr;
}
-ASTConsumer *ASTDumpAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+ASTDumpAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
return CreateASTDumper(CI.getFrontendOpts().ASTDumpFilter,
+ CI.getFrontendOpts().ASTDumpDecls,
CI.getFrontendOpts().ASTDumpLookups);
}
-ASTConsumer *ASTDeclListAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+ASTDeclListAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
return CreateASTDeclNodeLister();
}
-ASTConsumer *ASTViewAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+ASTViewAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
return CreateASTViewer();
}
-ASTConsumer *DeclContextPrintAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+DeclContextPrintAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
return CreateDeclContextPrinter();
}
-ASTConsumer *GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
std::string Sysroot;
std::string OutputFile;
raw_ostream *OS = nullptr;
@@ -83,8 +85,8 @@ ASTConsumer *GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI,
if (!CI.getFrontendOpts().RelocatablePCH)
Sysroot.clear();
- return new PCHGenerator(CI.getPreprocessor(), OutputFile, nullptr, Sysroot,
- OS);
+ return llvm::make_unique<PCHGenerator>(CI.getPreprocessor(), OutputFile,
+ nullptr, Sysroot, OS);
}
bool GeneratePCHAction::ComputeASTConsumerArguments(CompilerInstance &CI,
@@ -111,16 +113,17 @@ bool GeneratePCHAction::ComputeASTConsumerArguments(CompilerInstance &CI,
return false;
}
-ASTConsumer *GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
std::string Sysroot;
std::string OutputFile;
raw_ostream *OS = nullptr;
if (ComputeASTConsumerArguments(CI, InFile, Sysroot, OutputFile, OS))
return nullptr;
- return new PCHGenerator(CI.getPreprocessor(), OutputFile, Module,
- Sysroot, OS);
+ return llvm::make_unique<PCHGenerator>(CI.getPreprocessor(), OutputFile,
+ Module, Sysroot, OS);
}
static SmallVectorImpl<char> &
@@ -139,17 +142,9 @@ static std::error_code addHeaderInclude(StringRef HeaderName,
Includes += "#import \"";
else
Includes += "#include \"";
- // Use an absolute path for the include; there's no reason to think that
- // a relative path will work (. might not be on our include path) or that
- // it will find the same file.
- if (llvm::sys::path::is_absolute(HeaderName)) {
- Includes += HeaderName;
- } else {
- SmallString<256> Header = HeaderName;
- if (std::error_code Err = llvm::sys::fs::make_absolute(Header))
- return Err;
- Includes += Header;
- }
+
+ Includes += HeaderName;
+
Includes += "\"\n";
if (IsExternC && LangOpts.CPlusPlus)
Includes += "}\n";
@@ -160,7 +155,16 @@ static std::error_code addHeaderInclude(const FileEntry *Header,
SmallVectorImpl<char> &Includes,
const LangOptions &LangOpts,
bool IsExternC) {
- return addHeaderInclude(Header->getName(), Includes, LangOpts, IsExternC);
+ // Use an absolute path if we don't have a filename as written in the module
+ // map file; this ensures that we will identify the right file independent of
+ // header search paths.
+ if (llvm::sys::path::is_absolute(Header->getName()))
+ return addHeaderInclude(Header->getName(), Includes, LangOpts, IsExternC);
+
+ SmallString<256> AbsName(Header->getName());
+ if (std::error_code Err = llvm::sys::fs::make_absolute(AbsName))
+ return Err;
+ return addHeaderInclude(AbsName, Includes, LangOpts, IsExternC);
}
/// \brief Collect the set of header includes needed to construct the given
@@ -179,16 +183,20 @@ collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr,
return std::error_code();
// Add includes for each of these headers.
- for (unsigned I = 0, N = Module->NormalHeaders.size(); I != N; ++I) {
- const FileEntry *Header = Module->NormalHeaders[I];
- Module->addTopHeader(Header);
- if (std::error_code Err =
- addHeaderInclude(Header, Includes, LangOpts, Module->IsExternC))
+ for (Module::Header &H : Module->Headers[Module::HK_Normal]) {
+ Module->addTopHeader(H.Entry);
+ // Use the path as specified in the module map file. We'll look for this
+ // file relative to the module build directory (the directory containing
+ // the module map file) so this will find the same file that we found
+ // while parsing the module map.
+ if (std::error_code Err = addHeaderInclude(H.NameAsWritten, Includes,
+ LangOpts, Module->IsExternC))
return Err;
}
// Note that Module->PrivateHeaders will not be a TopHeader.
if (const FileEntry *UmbrellaHeader = Module->getUmbrellaHeader()) {
+ // FIXME: Track the name as written here.
Module->addTopHeader(UmbrellaHeader);
if (Module->Parent) {
// Include the umbrella header for submodules.
@@ -210,25 +218,30 @@ collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr,
.Cases(".h", ".H", ".hh", ".hpp", true)
.Default(false))
continue;
-
+
+ const FileEntry *Header = FileMgr.getFile(Dir->path());
+ // FIXME: This shouldn't happen unless there is a file system race. Is
+ // that worth diagnosing?
+ if (!Header)
+ continue;
+
// If this header is marked 'unavailable' in this module, don't include
// it.
- if (const FileEntry *Header = FileMgr.getFile(Dir->path())) {
- if (ModMap.isHeaderUnavailableInModule(Header, Module))
- continue;
- Module->addTopHeader(Header);
- }
-
+ if (ModMap.isHeaderUnavailableInModule(Header, Module))
+ continue;
+
// Include this header as part of the umbrella directory.
- if (std::error_code Err = addHeaderInclude(Dir->path(), Includes,
- LangOpts, Module->IsExternC))
+ // FIXME: Track the name as written through to here.
+ Module->addTopHeader(Header);
+ if (std::error_code Err =
+ addHeaderInclude(Header, Includes, LangOpts, Module->IsExternC))
return Err;
}
if (EC)
return EC;
}
-
+
// Recurse into submodules.
for (clang::Module::submodule_iterator Sub = Module->submodule_begin(),
SubEnd = Module->submodule_end();
@@ -285,7 +298,7 @@ bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
// Check whether we can build this module at all.
clang::Module::Requirement Requirement;
- clang::Module::HeaderDirective MissingHeader;
+ clang::Module::UnresolvedHeaderDirective MissingHeader;
if (!Module->isAvailable(CI.getLangOpts(), CI.getTarget(), Requirement,
MissingHeader)) {
if (MissingHeader.FileNameLoc.isValid()) {
@@ -301,10 +314,12 @@ bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
return false;
}
- if (!ModuleMapForUniquing)
+ if (ModuleMapForUniquing && ModuleMapForUniquing != ModuleMap) {
+ Module->IsInferred = true;
+ HS.getModuleMap().setInferredModuleAllowedBy(Module, ModuleMapForUniquing);
+ } else {
ModuleMapForUniquing = ModuleMap;
- Module->ModuleMap = ModuleMapForUniquing;
- assert(Module->ModuleMap && "missing module map file");
+ }
FileManager &FileMgr = CI.getFileManager();
@@ -312,6 +327,7 @@ bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
SmallString<256> HeaderContents;
std::error_code Err = std::error_code();
if (const FileEntry *UmbrellaHeader = Module->getUmbrellaHeader())
+ // FIXME: Track the file name as written.
Err = addHeaderInclude(UmbrellaHeader, HeaderContents, CI.getLangOpts(),
Module->IsExternC);
if (!Err)
@@ -326,11 +342,15 @@ bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
return false;
}
- llvm::MemoryBuffer *InputBuffer =
+ // Inform the preprocessor that includes from within the input buffer should
+ // be resolved relative to the build directory of the module map file.
+ CI.getPreprocessor().setMainFileDir(Module->Directory);
+
+ std::unique_ptr<llvm::MemoryBuffer> InputBuffer =
llvm::MemoryBuffer::getMemBufferCopy(HeaderContents,
Module::getModuleInputBufferName());
// Ownership of InputBuffer will be transferred to the SourceManager.
- setCurrentInput(FrontendInputFile(InputBuffer, getCurrentFileKind(),
+ setCurrentInput(FrontendInputFile(InputBuffer.release(), getCurrentFileKind(),
Module->IsSystem));
return true;
}
@@ -363,19 +383,20 @@ bool GenerateModuleAction::ComputeASTConsumerArguments(CompilerInstance &CI,
return false;
}
-ASTConsumer *SyntaxOnlyAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
- return new ASTConsumer();
+std::unique_ptr<ASTConsumer>
+SyntaxOnlyAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return llvm::make_unique<ASTConsumer>();
}
-ASTConsumer *DumpModuleInfoAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
- return new ASTConsumer();
+std::unique_ptr<ASTConsumer>
+DumpModuleInfoAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return llvm::make_unique<ASTConsumer>();
}
-ASTConsumer *VerifyPCHAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
- return new ASTConsumer();
+std::unique_ptr<ASTConsumer>
+VerifyPCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return llvm::make_unique<ASTConsumer>();
}
void VerifyPCHAction::ExecuteAction() {
@@ -425,8 +446,8 @@ namespace {
Out.indent(2) << "Module map file: " << ModuleMapPath << "\n";
}
- bool ReadLanguageOptions(const LangOptions &LangOpts,
- bool Complain) override {
+ bool ReadLanguageOptions(const LangOptions &LangOpts, bool Complain,
+ bool AllowCompatibleDifferences) override {
Out.indent(2) << "Language options:\n";
#define LANGOPT(Name, Bits, Default, Description) \
DUMP_BOOLEAN(LangOpts.Name, Description);
@@ -528,9 +549,9 @@ void DumpModuleInfoAction::ExecuteAction() {
std::unique_ptr<llvm::raw_fd_ostream> OutFile;
StringRef OutputFileName = getCompilerInstance().getFrontendOpts().OutputFile;
if (!OutputFileName.empty() && OutputFileName != "-") {
- std::string ErrorInfo;
- OutFile.reset(new llvm::raw_fd_ostream(OutputFileName.str().c_str(),
- ErrorInfo, llvm::sys::fs::F_Text));
+ std::error_code EC;
+ OutFile.reset(new llvm::raw_fd_ostream(OutputFileName.str(), EC,
+ llvm::sys::fs::F_Text));
}
llvm::raw_ostream &Out = OutFile.get()? *OutFile.get() : llvm::outs();
@@ -675,13 +696,12 @@ void PrintPreambleAction::ExecuteAction() {
// We can't do anything with these.
return;
}
-
+
CompilerInstance &CI = getCompilerInstance();
- llvm::MemoryBuffer *Buffer
- = CI.getFileManager().getBufferForFile(getCurrentFile());
+ auto Buffer = CI.getFileManager().getBufferForFile(getCurrentFile());
if (Buffer) {
- unsigned Preamble = Lexer::ComputePreamble(Buffer, CI.getLangOpts()).first;
- llvm::outs().write(Buffer->getBufferStart(), Preamble);
- delete Buffer;
+ unsigned Preamble =
+ Lexer::ComputePreamble((*Buffer)->getBuffer(), CI.getLangOpts()).first;
+ llvm::outs().write((*Buffer)->getBufferStart(), Preamble);
}
}
diff --git a/lib/Frontend/HeaderIncludeGen.cpp b/lib/Frontend/HeaderIncludeGen.cpp
index a2f5896746af..27011945712a 100644
--- a/lib/Frontend/HeaderIncludeGen.cpp
+++ b/lib/Frontend/HeaderIncludeGen.cpp
@@ -54,13 +54,12 @@ void clang::AttachHeaderIncludeGen(Preprocessor &PP, bool ShowAllHeaders,
// Open the output file, if used.
if (!OutputPath.empty()) {
- std::string Error;
+ std::error_code EC;
llvm::raw_fd_ostream *OS = new llvm::raw_fd_ostream(
- OutputPath.str().c_str(), Error,
- llvm::sys::fs::F_Append | llvm::sys::fs::F_Text);
- if (!Error.empty()) {
- PP.getDiagnostics().Report(
- clang::diag::warn_fe_cc_print_header_failure) << Error;
+ OutputPath.str(), EC, llvm::sys::fs::F_Append | llvm::sys::fs::F_Text);
+ if (EC) {
+ PP.getDiagnostics().Report(clang::diag::warn_fe_cc_print_header_failure)
+ << EC.message();
delete OS;
} else {
OS->SetUnbuffered();
@@ -70,9 +69,12 @@ void clang::AttachHeaderIncludeGen(Preprocessor &PP, bool ShowAllHeaders,
}
}
- PP.addPPCallbacks(new HeaderIncludesCallback(&PP, ShowAllHeaders,
- OutputFile, OwnsOutputFile,
- ShowDepth, MSStyle));
+ PP.addPPCallbacks(llvm::make_unique<HeaderIncludesCallback>(&PP,
+ ShowAllHeaders,
+ OutputFile,
+ OwnsOutputFile,
+ ShowDepth,
+ MSStyle));
}
void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
diff --git a/lib/Frontend/InitHeaderSearch.cpp b/lib/Frontend/InitHeaderSearch.cpp
index d2edc9479f6b..a518a0a27a23 100644
--- a/lib/Frontend/InitHeaderSearch.cpp
+++ b/lib/Frontend/InitHeaderSearch.cpp
@@ -380,7 +380,6 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOp
break;
case llvm::Triple::aarch64:
- case llvm::Triple::arm64:
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
"arm64-apple-darwin10", "", "", triple);
break;
@@ -391,7 +390,7 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOp
switch (os) {
case llvm::Triple::Linux:
llvm_unreachable("Include management is handled in the driver.");
-
+ break;
case llvm::Triple::Win32:
switch (triple.getEnvironment()) {
default: llvm_unreachable("Include management is handled in the driver.");
@@ -445,11 +444,6 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOp
case llvm::Triple::Solaris:
AddGnuCPlusPlusIncludePaths("/usr/gcc/4.5/include/c++/4.5.2/",
"i386-pc-solaris2.11", "", "", triple);
- // Solaris - Fall though..
- case llvm::Triple::AuroraUX:
- // AuroraUX
- AddGnuCPlusPlusIncludePaths("/opt/gcc4/include/c++/4.2.4",
- "i386-pc-solaris2.11", "", "", triple);
break;
default:
break;
@@ -473,7 +467,7 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
case llvm::Triple::Win32:
if (triple.getEnvironment() == llvm::Triple::MSVC ||
triple.getEnvironment() == llvm::Triple::Itanium ||
- triple.getObjectFormat() == llvm::Triple::MachO)
+ triple.isOSBinFormatMachO())
return;
break;
}
@@ -535,16 +529,16 @@ static unsigned RemoveDuplicates(std::vector<DirectoryLookup> &SearchList,
if (CurEntry.isNormalDir()) {
// If this isn't the first time we've seen this dir, remove it.
- if (SeenDirs.insert(CurEntry.getDir()))
+ if (SeenDirs.insert(CurEntry.getDir()).second)
continue;
} else if (CurEntry.isFramework()) {
// If this isn't the first time we've seen this framework dir, remove it.
- if (SeenFrameworkDirs.insert(CurEntry.getFrameworkDir()))
+ if (SeenFrameworkDirs.insert(CurEntry.getFrameworkDir()).second)
continue;
} else {
assert(CurEntry.isHeaderMap() && "Not a headermap or normal dir?");
// If this isn't the first time we've seen this headermap, remove it.
- if (SeenHeaderMaps.insert(CurEntry.getHeaderMap()))
+ if (SeenHeaderMaps.insert(CurEntry.getHeaderMap()).second)
continue;
}
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index 7a9d09a5e7ac..f4241a94ae02 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -65,17 +65,14 @@ static void DefineBuiltinMacro(MacroBuilder &Builder, StringRef Macro,
/// AddImplicitInclude - Add an implicit \#include of the specified file to the
/// predefines buffer.
-static void AddImplicitInclude(MacroBuilder &Builder, StringRef File,
- FileManager &FileMgr) {
- Builder.append(Twine("#include \"") +
- HeaderSearch::NormalizeDashIncludePath(File, FileMgr) + "\"");
+/// As these includes are generated by -include arguments the header search
+/// logic is going to search relatively to the current working directory.
+static void AddImplicitInclude(MacroBuilder &Builder, StringRef File) {
+ Builder.append(Twine("#include \"") + File + "\"");
}
-static void AddImplicitIncludeMacros(MacroBuilder &Builder,
- StringRef File,
- FileManager &FileMgr) {
- Builder.append(Twine("#__include_macros \"") +
- HeaderSearch::NormalizeDashIncludePath(File, FileMgr) + "\"");
+static void AddImplicitIncludeMacros(MacroBuilder &Builder, StringRef File) {
+ Builder.append(Twine("#__include_macros \"") + File + "\"");
// Marker token to stop the __include_macros fetch loop.
Builder.append("##"); // ##?
}
@@ -94,7 +91,7 @@ static void AddImplicitIncludePTH(MacroBuilder &Builder, Preprocessor &PP,
return;
}
- AddImplicitInclude(Builder, OriginalFile, PP.getFileManager());
+ AddImplicitInclude(Builder, OriginalFile);
}
/// \brief Add an implicit \#include using the original file used to generate
@@ -107,7 +104,7 @@ static void AddImplicitIncludePCH(MacroBuilder &Builder, Preprocessor &PP,
if (OriginalFile.empty())
return;
- AddImplicitInclude(Builder, OriginalFile, PP.getFileManager());
+ AddImplicitInclude(Builder, OriginalFile);
}
/// PickFP - This is used to pick a value based on the FP semantics of the
@@ -378,7 +375,7 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
// C++1y [cpp.predefined]p1:
// The name __cplusplus is defined to the value 201402L when compiling a
// C++ translation unit.
- else if (LangOpts.CPlusPlus1y)
+ else if (LangOpts.CPlusPlus14)
Builder.defineMacro("__cplusplus", "201402L");
// C++11 [cpp.predefined]p1:
// The name __cplusplus is defined to the value 201103L when compiling a
@@ -412,6 +409,12 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
/// ISO/IEC JTC1/SC22/WG21 (C++) SD-6: "SG10 Feature Test Recommendations".
static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
MacroBuilder &Builder) {
+ // C++98 features.
+ if (LangOpts.RTTI)
+ Builder.defineMacro("__cpp_rtti", "199711");
+ if (LangOpts.CXXExceptions)
+ Builder.defineMacro("__cpp_exceptions", "199711");
+
// C++11 features.
if (LangOpts.CPlusPlus11) {
Builder.defineMacro("__cpp_unicode_characters", "200704");
@@ -420,17 +423,25 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_user_defined_literals", "200809");
Builder.defineMacro("__cpp_lambdas", "200907");
Builder.defineMacro("__cpp_constexpr",
- LangOpts.CPlusPlus1y ? "201304" : "200704");
+ LangOpts.CPlusPlus14 ? "201304" : "200704");
+ Builder.defineMacro("__cpp_range_based_for", "200907");
Builder.defineMacro("__cpp_static_assert", "200410");
Builder.defineMacro("__cpp_decltype", "200707");
Builder.defineMacro("__cpp_attributes", "200809");
Builder.defineMacro("__cpp_rvalue_references", "200610");
Builder.defineMacro("__cpp_variadic_templates", "200704");
+ Builder.defineMacro("__cpp_initializer_lists", "200806");
+ Builder.defineMacro("__cpp_delegating_constructors", "200604");
+ Builder.defineMacro("__cpp_nsdmi", "200809");
+ Builder.defineMacro("__cpp_inheriting_constructors", "200802");
+ Builder.defineMacro("__cpp_ref_qualifiers", "200710");
+ Builder.defineMacro("__cpp_alias_templates", "200704");
}
// C++14 features.
- if (LangOpts.CPlusPlus1y) {
+ if (LangOpts.CPlusPlus14) {
Builder.defineMacro("__cpp_binary_literals", "201304");
+ Builder.defineMacro("__cpp_digit_separators", "201309");
Builder.defineMacro("__cpp_init_captures", "201304");
Builder.defineMacro("__cpp_generic_lambdas", "201304");
Builder.defineMacro("__cpp_decltype_auto", "201304");
@@ -438,6 +449,8 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_aggregate_nsdmi", "201304");
Builder.defineMacro("__cpp_variable_templates", "201304");
}
+ if (LangOpts.SizedDeallocation)
+ Builder.defineMacro("__cpp_sized_deallocation", "201309");
}
static void InitializePredefinedMacros(const TargetInfo &TI,
@@ -530,6 +543,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("IBOutletCollection(ClassName)",
"__attribute__((iboutletcollection(ClassName)))");
Builder.defineMacro("IBAction", "void)__attribute__((ibaction)");
+ Builder.defineMacro("IBInspectable", "");
+ Builder.defineMacro("IB_DESIGNABLE", "");
}
if (LangOpts.CPlusPlus)
@@ -551,7 +566,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__BLOCKS__");
}
- if (!LangOpts.MSVCCompat && LangOpts.CXXExceptions)
+ if (!LangOpts.MSVCCompat && LangOpts.Exceptions)
Builder.defineMacro("__EXCEPTIONS");
if (!LangOpts.MSVCCompat && LangOpts.RTTI)
Builder.defineMacro("__GXX_RTTI");
@@ -626,12 +641,10 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DefineTypeSize("__INTMAX_MAX__", TI.getIntMaxType(), TI, Builder);
DefineTypeSize("__SIZE_MAX__", TI.getSizeType(), TI, Builder);
- if (!LangOpts.MSVCCompat) {
- DefineTypeSize("__UINTMAX_MAX__", TI.getUIntMaxType(), TI, Builder);
- DefineTypeSize("__PTRDIFF_MAX__", TI.getPtrDiffType(0), TI, Builder);
- DefineTypeSize("__INTPTR_MAX__", TI.getIntPtrType(), TI, Builder);
- DefineTypeSize("__UINTPTR_MAX__", TI.getUIntPtrType(), TI, Builder);
- }
+ DefineTypeSize("__UINTMAX_MAX__", TI.getUIntMaxType(), TI, Builder);
+ DefineTypeSize("__PTRDIFF_MAX__", TI.getPtrDiffType(0), TI, Builder);
+ DefineTypeSize("__INTPTR_MAX__", TI.getIntPtrType(), TI, Builder);
+ DefineTypeSize("__UINTPTR_MAX__", TI.getUIntPtrType(), TI, Builder);
DefineTypeSizeof("__SIZEOF_DOUBLE__", TI.getDoubleWidth(), TI, Builder);
DefineTypeSizeof("__SIZEOF_FLOAT__", TI.getFloatWidth(), TI, Builder);
@@ -649,7 +662,12 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
TI.getTypeWidth(TI.getWCharType()), TI, Builder);
DefineTypeSizeof("__SIZEOF_WINT_T__",
TI.getTypeWidth(TI.getWIntType()), TI, Builder);
- if (TI.hasInt128Type())
+ // This is a temporary workaround while MIPS64 has not yet fully supported
+ // 128-bit integers. But declaration of int128 type is necessary even though
+ // __SIZEOF_INT128__ is undefined because c++ standard header files like
+ // limits throw error message if __int128 is not available.
+ if (TI.hasInt128Type() && !(TI.getTriple().getArch() == llvm::Triple::mips64el
+ || TI.getTriple().getArch() == llvm::Triple::mips64))
DefineTypeSizeof("__SIZEOF_INT128__", 128, TI, Builder);
DefineType("__INTMAX_TYPE__", TI.getIntMaxType(), Builder);
@@ -679,12 +697,10 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DefineType("__CHAR16_TYPE__", TI.getChar16Type(), Builder);
DefineType("__CHAR32_TYPE__", TI.getChar32Type(), Builder);
- if (!LangOpts.MSVCCompat) {
- DefineTypeWidth("__UINTMAX_WIDTH__", TI.getUIntMaxType(), TI, Builder);
- DefineType("__UINTPTR_TYPE__", TI.getUIntPtrType(), Builder);
- DefineFmt("__UINTPTR", TI.getUIntPtrType(), TI, Builder);
- DefineTypeWidth("__UINTPTR_WIDTH__", TI.getUIntPtrType(), TI, Builder);
- }
+ DefineTypeWidth("__UINTMAX_WIDTH__", TI.getUIntMaxType(), TI, Builder);
+ DefineType("__UINTPTR_TYPE__", TI.getUIntPtrType(), Builder);
+ DefineFmt("__UINTPTR", TI.getUIntPtrType(), TI, Builder);
+ DefineTypeWidth("__UINTPTR_WIDTH__", TI.getUIntPtrType(), TI, Builder);
DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat(), "F");
DefineFloatMacros(Builder, "DBL", &TI.getDoubleFormat(), "");
@@ -718,54 +734,52 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (TI.getLongLongWidth() > TI.getLongWidth())
DefineExactWidthIntType(TargetInfo::SignedLongLong, TI, Builder);
- if (!LangOpts.MSVCCompat) {
- DefineExactWidthIntType(TargetInfo::UnsignedChar, TI, Builder);
- DefineExactWidthIntTypeSize(TargetInfo::UnsignedChar, TI, Builder);
- DefineExactWidthIntTypeSize(TargetInfo::SignedChar, TI, Builder);
-
- if (TI.getShortWidth() > TI.getCharWidth()) {
- DefineExactWidthIntType(TargetInfo::UnsignedShort, TI, Builder);
- DefineExactWidthIntTypeSize(TargetInfo::UnsignedShort, TI, Builder);
- DefineExactWidthIntTypeSize(TargetInfo::SignedShort, TI, Builder);
- }
+ DefineExactWidthIntType(TargetInfo::UnsignedChar, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::UnsignedChar, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::SignedChar, TI, Builder);
- if (TI.getIntWidth() > TI.getShortWidth()) {
- DefineExactWidthIntType(TargetInfo::UnsignedInt, TI, Builder);
- DefineExactWidthIntTypeSize(TargetInfo::UnsignedInt, TI, Builder);
- DefineExactWidthIntTypeSize(TargetInfo::SignedInt, TI, Builder);
- }
+ if (TI.getShortWidth() > TI.getCharWidth()) {
+ DefineExactWidthIntType(TargetInfo::UnsignedShort, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::UnsignedShort, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::SignedShort, TI, Builder);
+ }
- if (TI.getLongWidth() > TI.getIntWidth()) {
- DefineExactWidthIntType(TargetInfo::UnsignedLong, TI, Builder);
- DefineExactWidthIntTypeSize(TargetInfo::UnsignedLong, TI, Builder);
- DefineExactWidthIntTypeSize(TargetInfo::SignedLong, TI, Builder);
- }
+ if (TI.getIntWidth() > TI.getShortWidth()) {
+ DefineExactWidthIntType(TargetInfo::UnsignedInt, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::UnsignedInt, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::SignedInt, TI, Builder);
+ }
- if (TI.getLongLongWidth() > TI.getLongWidth()) {
- DefineExactWidthIntType(TargetInfo::UnsignedLongLong, TI, Builder);
- DefineExactWidthIntTypeSize(TargetInfo::UnsignedLongLong, TI, Builder);
- DefineExactWidthIntTypeSize(TargetInfo::SignedLongLong, TI, Builder);
- }
+ if (TI.getLongWidth() > TI.getIntWidth()) {
+ DefineExactWidthIntType(TargetInfo::UnsignedLong, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::UnsignedLong, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::SignedLong, TI, Builder);
+ }
- DefineLeastWidthIntType(8, true, TI, Builder);
- DefineLeastWidthIntType(8, false, TI, Builder);
- DefineLeastWidthIntType(16, true, TI, Builder);
- DefineLeastWidthIntType(16, false, TI, Builder);
- DefineLeastWidthIntType(32, true, TI, Builder);
- DefineLeastWidthIntType(32, false, TI, Builder);
- DefineLeastWidthIntType(64, true, TI, Builder);
- DefineLeastWidthIntType(64, false, TI, Builder);
-
- DefineFastIntType(8, true, TI, Builder);
- DefineFastIntType(8, false, TI, Builder);
- DefineFastIntType(16, true, TI, Builder);
- DefineFastIntType(16, false, TI, Builder);
- DefineFastIntType(32, true, TI, Builder);
- DefineFastIntType(32, false, TI, Builder);
- DefineFastIntType(64, true, TI, Builder);
- DefineFastIntType(64, false, TI, Builder);
+ if (TI.getLongLongWidth() > TI.getLongWidth()) {
+ DefineExactWidthIntType(TargetInfo::UnsignedLongLong, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::UnsignedLongLong, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::SignedLongLong, TI, Builder);
}
+ DefineLeastWidthIntType(8, true, TI, Builder);
+ DefineLeastWidthIntType(8, false, TI, Builder);
+ DefineLeastWidthIntType(16, true, TI, Builder);
+ DefineLeastWidthIntType(16, false, TI, Builder);
+ DefineLeastWidthIntType(32, true, TI, Builder);
+ DefineLeastWidthIntType(32, false, TI, Builder);
+ DefineLeastWidthIntType(64, true, TI, Builder);
+ DefineLeastWidthIntType(64, false, TI, Builder);
+
+ DefineFastIntType(8, true, TI, Builder);
+ DefineFastIntType(8, false, TI, Builder);
+ DefineFastIntType(16, true, TI, Builder);
+ DefineFastIntType(16, false, TI, Builder);
+ DefineFastIntType(32, true, TI, Builder);
+ DefineFastIntType(32, false, TI, Builder);
+ DefineFastIntType(64, true, TI, Builder);
+ DefineFastIntType(64, false, TI, Builder);
+
if (const char *Prefix = TI.getUserLabelPrefix())
Builder.defineMacro("__USER_LABEL_PREFIX__", Prefix);
@@ -861,6 +875,13 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("_OPENMP", "201307");
}
+ // CUDA device path compilaton
+ if (LangOpts.CUDAIsDevice) {
+ // The CUDA_ARCH value is set for the GPU target specified in the NVPTX
+ // backend's target defines.
+ Builder.defineMacro("__CUDA_ARCH__");
+ }
+
// Get other target #defines.
TI.getTargetDefines(LangOpts, Builder);
}
@@ -925,8 +946,7 @@ void clang::InitializePreprocessor(Preprocessor &PP,
// If -imacros are specified, include them now. These are processed before
// any -include directives.
for (unsigned i = 0, e = InitOpts.MacroIncludes.size(); i != e; ++i)
- AddImplicitIncludeMacros(Builder, InitOpts.MacroIncludes[i],
- PP.getFileManager());
+ AddImplicitIncludeMacros(Builder, InitOpts.MacroIncludes[i]);
// Process -include-pch/-include-pth directives.
if (!InitOpts.ImplicitPCHInclude.empty())
@@ -937,7 +957,7 @@ void clang::InitializePreprocessor(Preprocessor &PP,
// Process -include directives.
for (unsigned i = 0, e = InitOpts.Includes.size(); i != e; ++i) {
const std::string &Path = InitOpts.Includes[i];
- AddImplicitInclude(Builder, Path, PP.getFileManager());
+ AddImplicitInclude(Builder, Path);
}
// Exit the command line and go back to <built-in> (2 is LC_LEAVE).
diff --git a/lib/Frontend/LogDiagnosticPrinter.cpp b/lib/Frontend/LogDiagnosticPrinter.cpp
index 19539e0a5eb2..c6a18e0d80d2 100644
--- a/lib/Frontend/LogDiagnosticPrinter.cpp
+++ b/lib/Frontend/LogDiagnosticPrinter.cpp
@@ -18,17 +18,11 @@
using namespace clang;
using namespace markup;
-LogDiagnosticPrinter::LogDiagnosticPrinter(raw_ostream &os,
- DiagnosticOptions *diags,
- bool _OwnsOutputStream)
- : OS(os), LangOpts(nullptr), DiagOpts(diags),
- OwnsOutputStream(_OwnsOutputStream) {
-}
-
-LogDiagnosticPrinter::~LogDiagnosticPrinter() {
- if (OwnsOutputStream)
- delete &OS;
-}
+LogDiagnosticPrinter::LogDiagnosticPrinter(
+ raw_ostream &os, DiagnosticOptions *diags,
+ std::unique_ptr<raw_ostream> StreamOwner)
+ : OS(os), StreamOwner(std::move(StreamOwner)), LangOpts(nullptr),
+ DiagOpts(diags) {}
static StringRef getLevelName(DiagnosticsEngine::Level Level) {
switch (Level) {
@@ -69,6 +63,14 @@ LogDiagnosticPrinter::EmitDiagEntry(llvm::raw_ostream &OS,
<< " ";
EmitString(OS, DE.Message) << '\n';
}
+ OS << " <key>ID</key>\n"
+ << " ";
+ EmitInteger(OS, DE.DiagnosticID) << '\n';
+ if (!DE.WarningOption.empty()) {
+ OS << " <key>WarningOption</key>\n"
+ << " ";
+ EmitString(OS, DE.WarningOption) << '\n';
+ }
OS << " </dict>\n";
}
@@ -128,6 +130,8 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
DE.DiagnosticID = Info.getID();
DE.DiagnosticLevel = Level;
+ DE.WarningOption = DiagnosticIDs::getWarningOptionForDiag(DE.DiagnosticID);
+
// Format the message.
SmallString<100> MessageStr;
Info.FormatDiagnostic(MessageStr);
diff --git a/lib/Frontend/ModuleDependencyCollector.cpp b/lib/Frontend/ModuleDependencyCollector.cpp
index d30f9214f82e..62865e984f65 100644
--- a/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/lib/Frontend/ModuleDependencyCollector.cpp
@@ -13,8 +13,8 @@
#include "clang/Frontend/Utils.h"
#include "clang/Serialization/ASTReader.h"
-#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
@@ -38,7 +38,7 @@ public:
}
void ModuleDependencyCollector::attachToASTReader(ASTReader &R) {
- R.addListener(new ModuleDependencyListener(*this));
+ R.addListener(llvm::make_unique<ModuleDependencyListener>(*this));
}
void ModuleDependencyCollector::writeFileMap() {
@@ -48,49 +48,26 @@ void ModuleDependencyCollector::writeFileMap() {
SmallString<256> Dest = getDest();
llvm::sys::path::append(Dest, "vfs.yaml");
- std::string ErrorInfo;
- llvm::raw_fd_ostream OS(Dest.c_str(), ErrorInfo, llvm::sys::fs::F_Text);
- if (!ErrorInfo.empty()) {
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(Dest, EC, llvm::sys::fs::F_Text);
+ if (EC) {
setHasErrors();
return;
}
VFSWriter.write(OS);
}
-/// Remove traversal (ie, . or ..) from the given absolute path.
-static void removePathTraversal(SmallVectorImpl<char> &Path) {
- using namespace llvm::sys;
- SmallVector<StringRef, 16> ComponentStack;
- StringRef P(Path.data(), Path.size());
-
- // Skip the root path, then look for traversal in the components.
- StringRef Rel = path::relative_path(P);
- for (StringRef C : llvm::make_range(path::begin(Rel), path::end(Rel))) {
- if (C == ".")
- continue;
- if (C == "..") {
- assert(ComponentStack.size() && "Path traverses out of parent");
- ComponentStack.pop_back();
- } else
- ComponentStack.push_back(C);
- }
-
- // The stack is now the path without any directory traversal.
- SmallString<256> Buffer = path::root_path(P);
- for (StringRef C : ComponentStack)
- path::append(Buffer, C);
-
- // Put the result in Path.
- Path.swap(Buffer);
-}
-
std::error_code ModuleDependencyListener::copyToRoot(StringRef Src) {
using namespace llvm::sys;
// We need an absolute path to append to the root.
SmallString<256> AbsoluteSrc = Src;
fs::make_absolute(AbsoluteSrc);
- removePathTraversal(AbsoluteSrc);
+ // Canonicalize to a native path to avoid mixed separator styles.
+ path::native(AbsoluteSrc);
+ // TODO: We probably need to handle .. as well as . in order to have valid
+ // input to the YAMLVFSWriter.
+ FileManager::removeDotPaths(AbsoluteSrc);
// Build the destination path.
SmallString<256> Dest = Collector.getDest();
diff --git a/lib/Frontend/MultiplexConsumer.cpp b/lib/Frontend/MultiplexConsumer.cpp
index 0e933a3f165f..019882833d41 100644
--- a/lib/Frontend/MultiplexConsumer.cpp
+++ b/lib/Frontend/MultiplexConsumer.cpp
@@ -107,6 +107,7 @@ public:
const ObjCPropertyDecl *OrigProp,
const ObjCCategoryDecl *ClassExt) override;
void DeclarationMarkedUsed(const Decl *D) override;
+ void DeclarationMarkedOpenMPThreadPrivate(const Decl *D) override;
private:
std::vector<ASTMutationListener*> Listeners;
@@ -180,121 +181,121 @@ void MultiplexASTMutationListener::DeclarationMarkedUsed(const Decl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->DeclarationMarkedUsed(D);
}
+void MultiplexASTMutationListener::DeclarationMarkedOpenMPThreadPrivate(
+ const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->DeclarationMarkedOpenMPThreadPrivate(D);
+}
} // end namespace clang
-MultiplexConsumer::MultiplexConsumer(ArrayRef<ASTConsumer *> C)
- : Consumers(C.begin(), C.end()), MutationListener(),
- DeserializationListener() {
+MultiplexConsumer::MultiplexConsumer(
+ std::vector<std::unique_ptr<ASTConsumer>> C)
+ : Consumers(std::move(C)), MutationListener(), DeserializationListener() {
// Collect the mutation listeners and deserialization listeners of all
// children, and create a multiplex listener each if so.
std::vector<ASTMutationListener*> mutationListeners;
std::vector<ASTDeserializationListener*> serializationListeners;
- for (size_t i = 0, e = Consumers.size(); i != e; ++i) {
- ASTMutationListener* mutationListener =
- Consumers[i]->GetASTMutationListener();
- if (mutationListener)
+ for (auto &Consumer : Consumers) {
+ if (auto *mutationListener = Consumer->GetASTMutationListener())
mutationListeners.push_back(mutationListener);
- ASTDeserializationListener* serializationListener =
- Consumers[i]->GetASTDeserializationListener();
- if (serializationListener)
+ if (auto *serializationListener = Consumer->GetASTDeserializationListener())
serializationListeners.push_back(serializationListener);
}
- if (mutationListeners.size()) {
- MutationListener.reset(new MultiplexASTMutationListener(mutationListeners));
+ if (!mutationListeners.empty()) {
+ MutationListener =
+ llvm::make_unique<MultiplexASTMutationListener>(mutationListeners);
}
- if (serializationListeners.size()) {
- DeserializationListener.reset(
- new MultiplexASTDeserializationListener(serializationListeners));
+ if (!serializationListeners.empty()) {
+ DeserializationListener =
+ llvm::make_unique<MultiplexASTDeserializationListener>(
+ serializationListeners);
}
}
-MultiplexConsumer::~MultiplexConsumer() {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- delete Consumers[i];
-}
+MultiplexConsumer::~MultiplexConsumer() {}
void MultiplexConsumer::Initialize(ASTContext &Context) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->Initialize(Context);
+ for (auto &Consumer : Consumers)
+ Consumer->Initialize(Context);
}
bool MultiplexConsumer::HandleTopLevelDecl(DeclGroupRef D) {
bool Continue = true;
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Continue = Continue && Consumers[i]->HandleTopLevelDecl(D);
+ for (auto &Consumer : Consumers)
+ Continue = Continue && Consumer->HandleTopLevelDecl(D);
return Continue;
}
void MultiplexConsumer::HandleInlineMethodDefinition(CXXMethodDecl *D) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleInlineMethodDefinition(D);
+ for (auto &Consumer : Consumers)
+ Consumer->HandleInlineMethodDefinition(D);
}
-void MultiplexConsumer::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleCXXStaticMemberVarInstantiation(VD);
+void MultiplexConsumer::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleCXXStaticMemberVarInstantiation(VD);
}
void MultiplexConsumer::HandleInterestingDecl(DeclGroupRef D) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleInterestingDecl(D);
+ for (auto &Consumer : Consumers)
+ Consumer->HandleInterestingDecl(D);
}
void MultiplexConsumer::HandleTranslationUnit(ASTContext &Ctx) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleTranslationUnit(Ctx);
+ for (auto &Consumer : Consumers)
+ Consumer->HandleTranslationUnit(Ctx);
}
void MultiplexConsumer::HandleTagDeclDefinition(TagDecl *D) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleTagDeclDefinition(D);
+ for (auto &Consumer : Consumers)
+ Consumer->HandleTagDeclDefinition(D);
}
void MultiplexConsumer::HandleTagDeclRequiredDefinition(const TagDecl *D) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleTagDeclRequiredDefinition(D);
+ for (auto &Consumer : Consumers)
+ Consumer->HandleTagDeclRequiredDefinition(D);
}
void MultiplexConsumer::HandleCXXImplicitFunctionInstantiation(FunctionDecl *D){
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleCXXImplicitFunctionInstantiation(D);
+ for (auto &Consumer : Consumers)
+ Consumer->HandleCXXImplicitFunctionInstantiation(D);
}
void MultiplexConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleTopLevelDeclInObjCContainer(D);
+ for (auto &Consumer : Consumers)
+ Consumer->HandleTopLevelDeclInObjCContainer(D);
}
void MultiplexConsumer::HandleImplicitImportDecl(ImportDecl *D) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleImplicitImportDecl(D);
+ for (auto &Consumer : Consumers)
+ Consumer->HandleImplicitImportDecl(D);
}
void MultiplexConsumer::HandleLinkerOptionPragma(llvm::StringRef Opts) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleLinkerOptionPragma(Opts);
+ for (auto &Consumer : Consumers)
+ Consumer->HandleLinkerOptionPragma(Opts);
}
void MultiplexConsumer::HandleDetectMismatch(llvm::StringRef Name, llvm::StringRef Value) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleDetectMismatch(Name, Value);
+ for (auto &Consumer : Consumers)
+ Consumer->HandleDetectMismatch(Name, Value);
}
void MultiplexConsumer::HandleDependentLibrary(llvm::StringRef Lib) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleDependentLibrary(Lib);
+ for (auto &Consumer : Consumers)
+ Consumer->HandleDependentLibrary(Lib);
}
void MultiplexConsumer::CompleteTentativeDefinition(VarDecl *D) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->CompleteTentativeDefinition(D);
+ for (auto &Consumer : Consumers)
+ Consumer->CompleteTentativeDefinition(D);
}
void MultiplexConsumer::HandleVTable(
CXXRecordDecl *RD, bool DefinitionRequired) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleVTable(RD, DefinitionRequired);
+ for (auto &Consumer : Consumers)
+ Consumer->HandleVTable(RD, DefinitionRequired);
}
ASTMutationListener *MultiplexConsumer::GetASTMutationListener() {
@@ -306,18 +307,18 @@ ASTDeserializationListener *MultiplexConsumer::GetASTDeserializationListener() {
}
void MultiplexConsumer::PrintStats() {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->PrintStats();
+ for (auto &Consumer : Consumers)
+ Consumer->PrintStats();
}
void MultiplexConsumer::InitializeSema(Sema &S) {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumers[i]))
+ for (auto &Consumer : Consumers)
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumer.get()))
SC->InitializeSema(S);
}
void MultiplexConsumer::ForgetSema() {
- for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumers[i]))
+ for (auto &Consumer : Consumers)
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumer.get()))
SC->ForgetSema();
}
diff --git a/lib/Frontend/PrintPreprocessedOutput.cpp b/lib/Frontend/PrintPreprocessedOutput.cpp
index 4a6f8dbef928..7c1d9a568831 100644
--- a/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -332,7 +332,10 @@ void PrintPPOutputPPCallbacks::InclusionDirective(SourceLocation HashLoc,
MoveToLine(HashLoc);
OS << "@import " << Imported->getFullModuleName() << ";"
<< " /* clang -E: implicit import for \"" << File->getName() << "\" */";
+ // Since we want a newline after the @import, but not a #<line>, start a new
+ // line immediately.
EmittedTokensOnThisLine = true;
+ startNewLineIfNeeded();
}
}
@@ -724,7 +727,7 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS,
PP.AddPragmaHandler("clang",
new UnknownPragmaHandler("#pragma clang", Callbacks));
- PP.addPPCallbacks(Callbacks);
+ PP.addPPCallbacks(std::unique_ptr<PPCallbacks>(Callbacks));
// After we have configured the preprocessor, enter the main file.
PP.EnterMainSourceFile();
diff --git a/lib/Frontend/Rewrite/FixItRewriter.cpp b/lib/Frontend/Rewrite/FixItRewriter.cpp
index 8b7af7166c8f..a3e14f9f020e 100644
--- a/lib/Frontend/Rewrite/FixItRewriter.cpp
+++ b/lib/Frontend/Rewrite/FixItRewriter.cpp
@@ -36,14 +36,13 @@ FixItRewriter::FixItRewriter(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
FixItOpts(FixItOpts),
NumFailures(0),
PrevDiagSilenced(false) {
- OwnsClient = Diags.ownsClient();
- Client = Diags.takeClient();
- Diags.setClient(this);
+ Owner = Diags.takeClient();
+ Client = Diags.getClient();
+ Diags.setClient(this, false);
}
FixItRewriter::~FixItRewriter() {
- Diags.takeClient();
- Diags.setClient(Client, OwnsClient);
+ Diags.setClient(Client, Owner.release() != nullptr);
}
bool FixItRewriter::WriteFixedFile(FileID ID, raw_ostream &OS) {
@@ -86,17 +85,16 @@ bool FixItRewriter::WriteFixedFiles(
const FileEntry *Entry = Rewrite.getSourceMgr().getFileEntryForID(I->first);
int fd;
std::string Filename = FixItOpts->RewriteFilename(Entry->getName(), fd);
- std::string Err;
+ std::error_code EC;
std::unique_ptr<llvm::raw_fd_ostream> OS;
if (fd != -1) {
OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true));
} else {
- OS.reset(new llvm::raw_fd_ostream(Filename.c_str(), Err,
- llvm::sys::fs::F_None));
+ OS.reset(new llvm::raw_fd_ostream(Filename, EC, llvm::sys::fs::F_None));
}
- if (!Err.empty()) {
- Diags.Report(clang::diag::err_fe_unable_to_open_output)
- << Filename << Err;
+ if (EC) {
+ Diags.Report(clang::diag::err_fe_unable_to_open_output) << Filename
+ << EC.message();
continue;
}
RewriteBuffer &RewriteBuf = I->second;
@@ -189,12 +187,10 @@ void FixItRewriter::Diag(SourceLocation Loc, unsigned DiagID) {
// When producing this diagnostic, we temporarily bypass ourselves,
// clear out any current diagnostic, and let the downstream client
// format the diagnostic.
- Diags.takeClient();
- Diags.setClient(Client);
+ Diags.setClient(Client, false);
Diags.Clear();
Diags.Report(Loc, DiagID);
- Diags.takeClient();
- Diags.setClient(this);
+ Diags.setClient(this, false);
}
FixItOptions::~FixItOptions() {}
diff --git a/lib/Frontend/Rewrite/FrontendActions.cpp b/lib/Frontend/Rewrite/FrontendActions.cpp
index 59fef736f16a..1b5eb2855bea 100644
--- a/lib/Frontend/Rewrite/FrontendActions.cpp
+++ b/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -30,8 +30,8 @@ using namespace clang;
// AST Consumer Actions
//===----------------------------------------------------------------------===//
-ASTConsumer *HTMLPrintAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+HTMLPrintAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile))
return CreateHTMLPrinter(OS, CI.getPreprocessor());
return nullptr;
@@ -40,9 +40,9 @@ ASTConsumer *HTMLPrintAction::CreateASTConsumer(CompilerInstance &CI,
FixItAction::FixItAction() {}
FixItAction::~FixItAction() {}
-ASTConsumer *FixItAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
- return new ASTConsumer();
+std::unique_ptr<ASTConsumer>
+FixItAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return llvm::make_unique<ASTConsumer>();
}
namespace {
@@ -148,8 +148,8 @@ bool FixItRecompile::BeginInvocation(CompilerInstance &CI) {
#ifdef CLANG_ENABLE_OBJC_REWRITER
-ASTConsumer *RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::unique_ptr<ASTConsumer>
+RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp")) {
if (CI.getLangOpts().ObjCRuntime.isNonFragile())
return CreateModernObjCRewriter(InFile, OS,
diff --git a/lib/Frontend/Rewrite/HTMLPrint.cpp b/lib/Frontend/Rewrite/HTMLPrint.cpp
index 64da05fdde61..22ccfe6936b7 100644
--- a/lib/Frontend/Rewrite/HTMLPrint.cpp
+++ b/lib/Frontend/Rewrite/HTMLPrint.cpp
@@ -47,11 +47,12 @@ namespace {
};
}
-ASTConsumer* clang::CreateHTMLPrinter(raw_ostream *OS,
- Preprocessor &PP,
- bool SyntaxHighlight,
- bool HighlightMacros) {
- return new HTMLPrinter(OS, PP, SyntaxHighlight, HighlightMacros);
+std::unique_ptr<ASTConsumer> clang::CreateHTMLPrinter(raw_ostream *OS,
+ Preprocessor &PP,
+ bool SyntaxHighlight,
+ bool HighlightMacros) {
+ return llvm::make_unique<HTMLPrinter>(OS, PP, SyntaxHighlight,
+ HighlightMacros);
}
void HTMLPrinter::Initialize(ASTContext &context) {
diff --git a/lib/Frontend/Rewrite/InclusionRewriter.cpp b/lib/Frontend/Rewrite/InclusionRewriter.cpp
index aa7017baee26..140055735a99 100644
--- a/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -40,6 +40,7 @@ class InclusionRewriter : public PPCallbacks {
Preprocessor &PP; ///< Used to find inclusion directives.
SourceManager &SM; ///< Used to read and manage source files.
raw_ostream &OS; ///< The destination stream for rewritten contents.
+ StringRef MainEOL; ///< The line ending marker to use.
const llvm::MemoryBuffer *PredefinesBuffer; ///< The preprocessor predefines.
bool ShowLineMarkers; ///< Show #line markers.
bool UseLineDirective; ///< Use of line directives or line markers.
@@ -54,6 +55,7 @@ public:
void setPredefinesBuffer(const llvm::MemoryBuffer *Buf) {
PredefinesBuffer = Buf;
}
+ void detectMainFileEOL();
private:
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
@@ -67,8 +69,8 @@ private:
const Module *Imported) override;
void WriteLineInfo(const char *Filename, int Line,
SrcMgr::CharacteristicKind FileType,
- StringRef EOL, StringRef Extra = StringRef());
- void WriteImplicitModuleImport(const Module *Mod, StringRef EOL);
+ StringRef Extra = StringRef());
+ void WriteImplicitModuleImport(const Module *Mod);
void OutputContentUpTo(const MemoryBuffer &FromFile,
unsigned &WriteFrom, unsigned WriteTo,
StringRef EOL, int &lines,
@@ -88,9 +90,9 @@ private:
/// Initializes an InclusionRewriter with a \p PP source and \p OS destination.
InclusionRewriter::InclusionRewriter(Preprocessor &PP, raw_ostream &OS,
bool ShowLineMarkers)
- : PP(PP), SM(PP.getSourceManager()), OS(OS), PredefinesBuffer(nullptr),
- ShowLineMarkers(ShowLineMarkers),
- LastInsertedFileChange(FileChanges.end()) {
+ : PP(PP), SM(PP.getSourceManager()), OS(OS), MainEOL("\n"),
+ PredefinesBuffer(nullptr), ShowLineMarkers(ShowLineMarkers),
+ LastInsertedFileChange(FileChanges.end()) {
// If we're in microsoft mode, use normal #line instead of line markers.
UseLineDirective = PP.getLangOpts().MicrosoftExt;
}
@@ -101,7 +103,7 @@ InclusionRewriter::InclusionRewriter(Preprocessor &PP, raw_ostream &OS,
/// any \p Extra context specifiers in GNU line directives.
void InclusionRewriter::WriteLineInfo(const char *Filename, int Line,
SrcMgr::CharacteristicKind FileType,
- StringRef EOL, StringRef Extra) {
+ StringRef Extra) {
if (!ShowLineMarkers)
return;
if (UseLineDirective) {
@@ -125,13 +127,12 @@ void InclusionRewriter::WriteLineInfo(const char *Filename, int Line,
// should be treated as being wrapped in an implicit extern "C" block."
OS << " 3 4";
}
- OS << EOL;
+ OS << MainEOL;
}
-void InclusionRewriter::WriteImplicitModuleImport(const Module *Mod,
- StringRef EOL) {
+void InclusionRewriter::WriteImplicitModuleImport(const Module *Mod) {
OS << "@import " << Mod->getFullModuleName() << ";"
- << " /* clang -frewrite-includes: implicit import */" << EOL;
+ << " /* clang -frewrite-includes: implicit import */" << MainEOL;
}
/// FileChanged - Whenever the preprocessor enters or exits a #include file
@@ -197,23 +198,33 @@ InclusionRewriter::FindFileChangeLocation(SourceLocation Loc) const {
/// Detect the likely line ending style of \p FromFile by examining the first
/// newline found within it.
static StringRef DetectEOL(const MemoryBuffer &FromFile) {
- // detect what line endings the file uses, so that added content does not mix
- // the style
+ // Detect what line endings the file uses, so that added content does not mix
+ // the style. We need to check for "\r\n" first because "\n\r" will match
+ // "\r\n\r\n".
const char *Pos = strchr(FromFile.getBufferStart(), '\n');
if (!Pos)
return "\n";
- if (Pos + 1 < FromFile.getBufferEnd() && Pos[1] == '\r')
- return "\n\r";
if (Pos - 1 >= FromFile.getBufferStart() && Pos[-1] == '\r')
return "\r\n";
+ if (Pos + 1 < FromFile.getBufferEnd() && Pos[1] == '\r')
+ return "\n\r";
return "\n";
}
+void InclusionRewriter::detectMainFileEOL() {
+ bool Invalid;
+ const MemoryBuffer &FromFile = *SM.getBuffer(SM.getMainFileID(), &Invalid);
+ assert(!Invalid);
+ if (Invalid)
+ return; // Should never happen, but whatever.
+ MainEOL = DetectEOL(FromFile);
+}
+
/// Writes out bytes from \p FromFile, starting at \p NextToWrite and ending at
/// \p WriteTo - 1.
void InclusionRewriter::OutputContentUpTo(const MemoryBuffer &FromFile,
unsigned &WriteFrom, unsigned WriteTo,
- StringRef EOL, int &Line,
+ StringRef LocalEOL, int &Line,
bool EnsureNewline) {
if (WriteTo <= WriteFrom)
return;
@@ -222,14 +233,37 @@ void InclusionRewriter::OutputContentUpTo(const MemoryBuffer &FromFile,
WriteFrom = WriteTo;
return;
}
- OS.write(FromFile.getBufferStart() + WriteFrom, WriteTo - WriteFrom);
- // count lines manually, it's faster than getPresumedLoc()
- Line += std::count(FromFile.getBufferStart() + WriteFrom,
- FromFile.getBufferStart() + WriteTo, '\n');
- if (EnsureNewline) {
- char LastChar = FromFile.getBufferStart()[WriteTo - 1];
- if (LastChar != '\n' && LastChar != '\r')
- OS << EOL;
+
+ // If we would output half of a line ending, advance one character to output
+ // the whole line ending. All buffers are null terminated, so looking ahead
+ // one byte is safe.
+ if (LocalEOL.size() == 2 &&
+ LocalEOL[0] == (FromFile.getBufferStart() + WriteTo)[-1] &&
+ LocalEOL[1] == (FromFile.getBufferStart() + WriteTo)[0])
+ WriteTo++;
+
+ StringRef TextToWrite(FromFile.getBufferStart() + WriteFrom,
+ WriteTo - WriteFrom);
+
+ if (MainEOL == LocalEOL) {
+ OS << TextToWrite;
+ // count lines manually, it's faster than getPresumedLoc()
+ Line += TextToWrite.count(LocalEOL);
+ if (EnsureNewline && !TextToWrite.endswith(LocalEOL))
+ OS << MainEOL;
+ } else {
+ // Output the file one line at a time, rewriting the line endings as we go.
+ StringRef Rest = TextToWrite;
+ while (!Rest.empty()) {
+ StringRef LineText;
+ std::tie(LineText, Rest) = Rest.split(LocalEOL);
+ OS << LineText;
+ Line++;
+ if (!Rest.empty())
+ OS << MainEOL;
+ }
+ if (TextToWrite.endswith(LocalEOL) || EnsureNewline)
+ OS << MainEOL;
}
WriteFrom = WriteTo;
}
@@ -242,10 +276,11 @@ void InclusionRewriter::OutputContentUpTo(const MemoryBuffer &FromFile,
void InclusionRewriter::CommentOutDirective(Lexer &DirectiveLex,
const Token &StartToken,
const MemoryBuffer &FromFile,
- StringRef EOL,
+ StringRef LocalEOL,
unsigned &NextToWrite, int &Line) {
OutputContentUpTo(FromFile, NextToWrite,
- SM.getFileOffset(StartToken.getLocation()), EOL, Line, false);
+ SM.getFileOffset(StartToken.getLocation()), LocalEOL, Line,
+ false);
Token DirectiveToken;
do {
DirectiveLex.LexFromRawLexer(DirectiveToken);
@@ -254,11 +289,12 @@ void InclusionRewriter::CommentOutDirective(Lexer &DirectiveLex,
// OutputContentUpTo() would not output anything anyway.
return;
}
- OS << "#if 0 /* expanded by -frewrite-includes */" << EOL;
+ OS << "#if 0 /* expanded by -frewrite-includes */" << MainEOL;
OutputContentUpTo(FromFile, NextToWrite,
- SM.getFileOffset(DirectiveToken.getLocation()) + DirectiveToken.getLength(),
- EOL, Line, true);
- OS << "#endif /* expanded by -frewrite-includes */" << EOL;
+ SM.getFileOffset(DirectiveToken.getLocation()) +
+ DirectiveToken.getLength(),
+ LocalEOL, Line, true);
+ OS << "#endif /* expanded by -frewrite-includes */" << MainEOL;
}
/// Find the next identifier in the pragma directive specified by \p RawToken.
@@ -333,10 +369,13 @@ bool InclusionRewriter::HandleHasInclude(
// FIXME: Subframeworks aren't handled here. Do we care?
bool isAngled = PP.GetIncludeFilenameSpelling(Tok.getLocation(), Filename);
const DirectoryLookup *CurDir;
+ const FileEntry *FileEnt = PP.getSourceManager().getFileEntryForID(FileId);
+ SmallVector<std::pair<const FileEntry *, const DirectoryEntry *>, 1>
+ Includers;
+ Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
const FileEntry *File = PP.getHeaderSearchInfo().LookupFile(
- Filename, SourceLocation(), isAngled, nullptr, CurDir,
- PP.getSourceManager().getFileEntryForID(FileId), nullptr, nullptr,
- nullptr, false);
+ Filename, SourceLocation(), isAngled, nullptr, CurDir, Includers, nullptr,
+ nullptr, nullptr, false);
FileExists = File != nullptr;
return true;
@@ -355,13 +394,13 @@ bool InclusionRewriter::Process(FileID FileId,
Lexer RawLex(FileId, &FromFile, PP.getSourceManager(), PP.getLangOpts());
RawLex.SetCommentRetentionState(false);
- StringRef EOL = DetectEOL(FromFile);
+ StringRef LocalEOL = DetectEOL(FromFile);
// Per the GNU docs: "1" indicates entering a new file.
if (FileId == SM.getMainFileID() || FileId == PP.getPredefinesFileID())
- WriteLineInfo(FileName, 1, FileType, EOL, "");
+ WriteLineInfo(FileName, 1, FileType, "");
else
- WriteLineInfo(FileName, 1, FileType, EOL, " 1");
+ WriteLineInfo(FileName, 1, FileType, " 1");
if (SM.getFileIDSize(FileId) == 0)
return false;
@@ -389,15 +428,15 @@ bool InclusionRewriter::Process(FileID FileId,
case tok::pp_include:
case tok::pp_include_next:
case tok::pp_import: {
- CommentOutDirective(RawLex, HashToken, FromFile, EOL, NextToWrite,
+ CommentOutDirective(RawLex, HashToken, FromFile, LocalEOL, NextToWrite,
Line);
if (FileId != PP.getPredefinesFileID())
- WriteLineInfo(FileName, Line - 1, FileType, EOL, "");
+ WriteLineInfo(FileName, Line - 1, FileType, "");
StringRef LineInfoExtra;
if (const FileChange *Change = FindFileChangeLocation(
HashToken.getLocation())) {
if (Change->Mod) {
- WriteImplicitModuleImport(Change->Mod, EOL);
+ WriteImplicitModuleImport(Change->Mod);
// else now include and recursively process the file
} else if (Process(Change->Id, Change->FileType)) {
@@ -410,7 +449,7 @@ bool InclusionRewriter::Process(FileID FileId,
}
// fix up lineinfo (since commented out directive changed line
// numbers) for inclusions that were skipped due to header guards
- WriteLineInfo(FileName, Line, FileType, EOL, LineInfoExtra);
+ WriteLineInfo(FileName, Line, FileType, LineInfoExtra);
break;
}
case tok::pp_pragma: {
@@ -418,17 +457,17 @@ bool InclusionRewriter::Process(FileID FileId,
if (Identifier == "clang" || Identifier == "GCC") {
if (NextIdentifierName(RawLex, RawToken) == "system_header") {
// keep the directive in, commented out
- CommentOutDirective(RawLex, HashToken, FromFile, EOL,
+ CommentOutDirective(RawLex, HashToken, FromFile, LocalEOL,
NextToWrite, Line);
// update our own type
FileType = SM.getFileCharacteristic(RawToken.getLocation());
- WriteLineInfo(FileName, Line, FileType, EOL);
+ WriteLineInfo(FileName, Line, FileType);
}
} else if (Identifier == "once") {
// keep the directive in, commented out
- CommentOutDirective(RawLex, HashToken, FromFile, EOL,
+ CommentOutDirective(RawLex, HashToken, FromFile, LocalEOL,
NextToWrite, Line);
- WriteLineInfo(FileName, Line, FileType, EOL);
+ WriteLineInfo(FileName, Line, FileType);
}
break;
}
@@ -468,12 +507,12 @@ bool InclusionRewriter::Process(FileID FileId,
// Replace the macro with (0) or (1), followed by the commented
// out macro for reference.
OutputContentUpTo(FromFile, NextToWrite, SM.getFileOffset(Loc),
- EOL, Line, false);
+ LocalEOL, Line, false);
OS << '(' << (int) HasFile << ")/*";
OutputContentUpTo(FromFile, NextToWrite,
SM.getFileOffset(RawToken.getLocation()) +
- RawToken.getLength(),
- EOL, Line, false);
+ RawToken.getLength(),
+ LocalEOL, Line, false);
OS << "*/";
}
} while (RawToken.isNot(tok::eod));
@@ -481,8 +520,8 @@ bool InclusionRewriter::Process(FileID FileId,
OutputContentUpTo(FromFile, NextToWrite,
SM.getFileOffset(RawToken.getLocation()) +
RawToken.getLength(),
- EOL, Line, /*EnsureNewLine*/ true);
- WriteLineInfo(FileName, Line, FileType, EOL);
+ LocalEOL, Line, /*EnsureNewline=*/ true);
+ WriteLineInfo(FileName, Line, FileType);
}
break;
}
@@ -497,11 +536,11 @@ bool InclusionRewriter::Process(FileID FileId,
do {
RawLex.LexFromRawLexer(RawToken);
} while (RawToken.isNot(tok::eod) && RawToken.isNot(tok::eof));
- OutputContentUpTo(
- FromFile, NextToWrite,
- SM.getFileOffset(RawToken.getLocation()) + RawToken.getLength(),
- EOL, Line, /*EnsureNewLine*/ true);
- WriteLineInfo(FileName, Line, FileType, EOL);
+ OutputContentUpTo(FromFile, NextToWrite,
+ SM.getFileOffset(RawToken.getLocation()) +
+ RawToken.getLength(),
+ LocalEOL, Line, /*EnsureNewline=*/ true);
+ WriteLineInfo(FileName, Line, FileType);
RawLex.SetKeepWhitespaceMode(false);
}
default:
@@ -513,8 +552,8 @@ bool InclusionRewriter::Process(FileID FileId,
RawLex.LexFromRawLexer(RawToken);
}
OutputContentUpTo(FromFile, NextToWrite,
- SM.getFileOffset(SM.getLocForEndOfFile(FileId)), EOL, Line,
- /*EnsureNewline*/true);
+ SM.getFileOffset(SM.getLocForEndOfFile(FileId)), LocalEOL,
+ Line, /*EnsureNewline=*/true);
return true;
}
@@ -524,7 +563,9 @@ void clang::RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS,
SourceManager &SM = PP.getSourceManager();
InclusionRewriter *Rewrite = new InclusionRewriter(PP, *OS,
Opts.ShowLineMarkers);
- PP.addPPCallbacks(Rewrite);
+ Rewrite->detectMainFileEOL();
+
+ PP.addPPCallbacks(std::unique_ptr<PPCallbacks>(Rewrite));
PP.IgnorePragmas();
// First let the preprocessor process the entire file and call callbacks.
diff --git a/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index 3e18a8b415a3..47f8189f2313 100644
--- a/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -249,27 +249,16 @@ namespace {
void HandleTranslationUnit(ASTContext &C) override;
void ReplaceStmt(Stmt *Old, Stmt *New) {
- Stmt *ReplacingStmt = ReplacedNodes[Old];
-
- if (ReplacingStmt)
- return; // We can't rewrite the same node twice.
-
- if (DisableReplaceStmt)
- return;
-
- // If replacement succeeded or warning disabled return with no warning.
- if (!Rewrite.ReplaceStmt(Old, New)) {
- ReplacedNodes[Old] = New;
- return;
- }
- if (SilenceRewriteMacroWarning)
- return;
- Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
- << Old->getSourceRange();
+ ReplaceStmtWithRange(Old, New, Old->getSourceRange());
}
void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) {
assert(Old != nullptr && New != nullptr && "Expected non-null Stmt's");
+
+ Stmt *ReplacingStmt = ReplacedNodes[Old];
+ if (ReplacingStmt)
+ return; // We can't rewrite the same node twice.
+
if (DisableReplaceStmt)
return;
@@ -508,7 +497,7 @@ namespace {
void GetBlockDeclRefExprs(Stmt *S);
void GetInnerBlockDeclRefExprs(Stmt *S,
SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs,
- llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts);
+ llvm::SmallPtrSetImpl<const DeclContext *> &InnerContexts);
// We avoid calling Type::isBlockPointerType(), since it operates on the
// canonical type. We only care if the top-level type is a closure pointer.
@@ -675,14 +664,11 @@ RewriteModernObjC::RewriteModernObjC(std::string inFile, raw_ostream* OS,
"for @try/@finally (code may not execute properly)");
}
-ASTConsumer *clang::CreateModernObjCRewriter(const std::string& InFile,
- raw_ostream* OS,
- DiagnosticsEngine &Diags,
- const LangOptions &LOpts,
- bool SilenceRewriteMacroWarning,
- bool LineInfo) {
- return new RewriteModernObjC(InFile, OS, Diags, LOpts,
- SilenceRewriteMacroWarning, LineInfo);
+std::unique_ptr<ASTConsumer> clang::CreateModernObjCRewriter(
+ const std::string &InFile, raw_ostream *OS, DiagnosticsEngine &Diags,
+ const LangOptions &LOpts, bool SilenceRewriteMacroWarning, bool LineInfo) {
+ return llvm::make_unique<RewriteModernObjC>(
+ InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning, LineInfo);
}
void RewriteModernObjC::InitializeCommon(ASTContext &context) {
@@ -4055,7 +4041,7 @@ void RewriteModernObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
ReplaceText(LocStart, endBuf-startBuf, Result);
// Mark this struct as having been generated.
- if (!ObjCSynthesizedStructs.insert(CDecl))
+ if (!ObjCSynthesizedStructs.insert(CDecl).second)
llvm_unreachable("struct already synthesize- RewriteObjCInternalStruct");
}
@@ -4070,9 +4056,7 @@ void RewriteModernObjC::RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl,
return;
llvm::DenseSet<std::pair<const ObjCInterfaceDecl*, unsigned> > GroupSymbolOutput;
- for (llvm::SmallPtrSet<ObjCIvarDecl *, 8>::iterator i = Ivars.begin(),
- e = Ivars.end(); i != e; i++) {
- ObjCIvarDecl *IvarDecl = (*i);
+ for (ObjCIvarDecl *IvarDecl : Ivars) {
const ObjCInterfaceDecl *IDecl = IvarDecl->getContainingInterface();
unsigned GroupNo = 0;
if (IvarDecl->isBitField()) {
@@ -4256,14 +4240,12 @@ std::string RewriteModernObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
S += "(" + StructRef;
S += "*dst, " + StructRef;
S += "*src) {";
- for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
- E = ImportedBlockDecls.end(); I != E; ++I) {
- ValueDecl *VD = (*I);
+ for (ValueDecl *VD : ImportedBlockDecls) {
S += "_Block_object_assign((void*)&dst->";
- S += (*I)->getNameAsString();
+ S += VD->getNameAsString();
S += ", (void*)src->";
- S += (*I)->getNameAsString();
- if (BlockByRefDeclsPtrSet.count((*I)))
+ S += VD->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count(VD))
S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
else if (VD->getType()->isBlockPointerType())
S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
@@ -4277,12 +4259,10 @@ std::string RewriteModernObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
S += "_block_dispose_" + utostr(i);
S += "(" + StructRef;
S += "*src) {";
- for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
- E = ImportedBlockDecls.end(); I != E; ++I) {
- ValueDecl *VD = (*I);
+ for (ValueDecl *VD : ImportedBlockDecls) {
S += "_Block_object_dispose((void*)src->";
- S += (*I)->getNameAsString();
- if (BlockByRefDeclsPtrSet.count((*I)))
+ S += VD->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count(VD))
S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
else if (VD->getType()->isBlockPointerType())
S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
@@ -4583,22 +4563,18 @@ void RewriteModernObjC::GetBlockDeclRefExprs(Stmt *S) {
GetBlockDeclRefExprs(*CI);
}
// Handle specific things.
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
- if (DRE->refersToEnclosingLocal()) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S))
+ if (DRE->refersToEnclosingVariableOrCapture() ||
+ HasLocalVariableExternalStorage(DRE->getDecl()))
// FIXME: Handle enums.
- if (!isa<FunctionDecl>(DRE->getDecl()))
- BlockDeclRefs.push_back(DRE);
- if (HasLocalVariableExternalStorage(DRE->getDecl()))
- BlockDeclRefs.push_back(DRE);
- }
- }
-
+ BlockDeclRefs.push_back(DRE);
+
return;
}
void RewriteModernObjC::GetInnerBlockDeclRefExprs(Stmt *S,
SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs,
- llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts) {
+ llvm::SmallPtrSetImpl<const DeclContext *> &InnerContexts) {
for (Stmt::child_range CI = S->children(); CI; ++CI)
if (*CI) {
if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI)) {
@@ -4615,11 +4591,11 @@ void RewriteModernObjC::GetInnerBlockDeclRefExprs(Stmt *S,
}
// Handle specific things.
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
- if (DRE->refersToEnclosingLocal()) {
- if (!isa<FunctionDecl>(DRE->getDecl()) &&
- !InnerContexts.count(DRE->getDecl()->getDeclContext()))
+ if (DRE->refersToEnclosingVariableOrCapture() ||
+ HasLocalVariableExternalStorage(DRE->getDecl())) {
+ if (!InnerContexts.count(DRE->getDecl()->getDeclContext()))
InnerBlockDeclRefs.push_back(DRE);
- if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VarDecl *Var = cast<VarDecl>(DRE->getDecl()))
if (Var->isFunctionOrMethodVarDecl())
ImportedLocalExternalDecls.insert(Var);
}
@@ -4796,7 +4772,8 @@ Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
// Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR
// for each DeclRefExp where BYREFVAR is name of the variable.
ValueDecl *VD = DeclRefExp->getDecl();
- bool isArrow = DeclRefExp->refersToEnclosingLocal();
+ bool isArrow = DeclRefExp->refersToEnclosingVariableOrCapture() ||
+ HasLocalVariableExternalStorage(DeclRefExp->getDecl());
FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
@@ -5546,6 +5523,10 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
VK_RValue, OK_Ordinary, SourceLocation());
NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
+ // Put Paren around the call.
+ NewRep = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ NewRep);
+
BlockDeclRefs.clear();
BlockByRefDecls.clear();
BlockByRefDeclsPtrSet.clear();
@@ -5978,10 +5959,9 @@ void RewriteModernObjC::HandleTranslationUnit(ASTContext &C) {
// Here's a great place to add any extra declarations that may be needed.
// Write out meta data for each @protocol(<expr>).
- for (llvm::SmallPtrSet<ObjCProtocolDecl *,8>::iterator I = ProtocolExprDecls.begin(),
- E = ProtocolExprDecls.end(); I != E; ++I) {
- RewriteObjCProtocolMetaData(*I, Preamble);
- Write_ProtocolExprReferencedMetadata(Context, (*I), Preamble);
+ for (ObjCProtocolDecl *ProtDecl : ProtocolExprDecls) {
+ RewriteObjCProtocolMetaData(ProtDecl, Preamble);
+ Write_ProtocolExprReferencedMetadata(Context, ProtDecl, Preamble);
}
InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false);
@@ -7122,7 +7102,7 @@ void RewriteModernObjC::RewriteObjCProtocolMetaData(ObjCProtocolDecl *PDecl,
Result += ";\n";
// Mark this protocol as having been generated.
- if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl()))
+ if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl()).second)
llvm_unreachable("protocol already synthesized");
}
diff --git a/lib/Frontend/Rewrite/RewriteObjC.cpp b/lib/Frontend/Rewrite/RewriteObjC.cpp
index 7a721771759a..519681085f56 100644
--- a/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -198,27 +198,16 @@ namespace {
void HandleTranslationUnit(ASTContext &C) override;
void ReplaceStmt(Stmt *Old, Stmt *New) {
- Stmt *ReplacingStmt = ReplacedNodes[Old];
-
- if (ReplacingStmt)
- return; // We can't rewrite the same node twice.
-
- if (DisableReplaceStmt)
- return;
-
- // If replacement succeeded or warning disabled return with no warning.
- if (!Rewrite.ReplaceStmt(Old, New)) {
- ReplacedNodes[Old] = New;
- return;
- }
- if (SilenceRewriteMacroWarning)
- return;
- Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
- << Old->getSourceRange();
+ ReplaceStmtWithRange(Old, New, Old->getSourceRange());
}
void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) {
assert(Old != nullptr && New != nullptr && "Expected non-null Stmt's");
+
+ Stmt *ReplacingStmt = ReplacedNodes[Old];
+ if (ReplacingStmt)
+ return; // We can't rewrite the same node twice.
+
if (DisableReplaceStmt)
return;
@@ -332,7 +321,7 @@ namespace {
void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
std::string &Result);
- virtual void Initialize(ASTContext &context) override = 0;
+ void Initialize(ASTContext &context) override = 0;
// Metadata Rewriting.
virtual void RewriteMetaDataIntoBuffer(std::string &Result) = 0;
@@ -413,7 +402,7 @@ namespace {
void GetBlockDeclRefExprs(Stmt *S);
void GetInnerBlockDeclRefExprs(Stmt *S,
SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs,
- llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts);
+ llvm::SmallPtrSetImpl<const DeclContext *> &InnerContexts);
// We avoid calling Type::isBlockPointerType(), since it operates on the
// canonical type. We only care if the top-level type is a closure pointer.
@@ -524,7 +513,7 @@ namespace {
silenceMacroWarn) {}
~RewriteObjCFragileABI() {}
- virtual void Initialize(ASTContext &context) override;
+ void Initialize(ASTContext &context) override;
// Rewriting metadata
template<typename MethodIterator>
@@ -600,12 +589,12 @@ RewriteObjC::RewriteObjC(std::string inFile, raw_ostream* OS,
"for @try/@finally (code may not execute properly)");
}
-ASTConsumer *clang::CreateObjCRewriter(const std::string& InFile,
- raw_ostream* OS,
- DiagnosticsEngine &Diags,
- const LangOptions &LOpts,
- bool SilenceRewriteMacroWarning) {
- return new RewriteObjCFragileABI(InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning);
+std::unique_ptr<ASTConsumer>
+clang::CreateObjCRewriter(const std::string &InFile, raw_ostream *OS,
+ DiagnosticsEngine &Diags, const LangOptions &LOpts,
+ bool SilenceRewriteMacroWarning) {
+ return llvm::make_unique<RewriteObjCFragileABI>(InFile, OS, Diags, LOpts,
+ SilenceRewriteMacroWarning);
}
void RewriteObjC::InitializeCommon(ASTContext &context) {
@@ -3238,7 +3227,7 @@ void RewriteObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
ReplaceText(LocStart, endBuf-startBuf, Result);
}
// Mark this struct as having been generated.
- if (!ObjCSynthesizedStructs.insert(CDecl))
+ if (!ObjCSynthesizedStructs.insert(CDecl).second)
llvm_unreachable("struct already synthesize- SynthesizeObjCInternalStruct");
}
@@ -3382,14 +3371,12 @@ std::string RewriteObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
S += "(" + StructRef;
S += "*dst, " + StructRef;
S += "*src) {";
- for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
- E = ImportedBlockDecls.end(); I != E; ++I) {
- ValueDecl *VD = (*I);
+ for (ValueDecl *VD : ImportedBlockDecls) {
S += "_Block_object_assign((void*)&dst->";
- S += (*I)->getNameAsString();
+ S += VD->getNameAsString();
S += ", (void*)src->";
- S += (*I)->getNameAsString();
- if (BlockByRefDeclsPtrSet.count((*I)))
+ S += VD->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count(VD))
S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
else if (VD->getType()->isBlockPointerType())
S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
@@ -3403,12 +3390,10 @@ std::string RewriteObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
S += "_block_dispose_" + utostr(i);
S += "(" + StructRef;
S += "*src) {";
- for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
- E = ImportedBlockDecls.end(); I != E; ++I) {
- ValueDecl *VD = (*I);
+ for (ValueDecl *VD : ImportedBlockDecls) {
S += "_Block_object_dispose((void*)src->";
- S += (*I)->getNameAsString();
- if (BlockByRefDeclsPtrSet.count((*I)))
+ S += VD->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count(VD))
S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
else if (VD->getType()->isBlockPointerType())
S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
@@ -3686,22 +3671,18 @@ void RewriteObjC::GetBlockDeclRefExprs(Stmt *S) {
GetBlockDeclRefExprs(*CI);
}
// Handle specific things.
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
- if (DRE->refersToEnclosingLocal()) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S))
+ if (DRE->refersToEnclosingVariableOrCapture() ||
+ HasLocalVariableExternalStorage(DRE->getDecl()))
// FIXME: Handle enums.
- if (!isa<FunctionDecl>(DRE->getDecl()))
- BlockDeclRefs.push_back(DRE);
- if (HasLocalVariableExternalStorage(DRE->getDecl()))
- BlockDeclRefs.push_back(DRE);
- }
- }
-
+ BlockDeclRefs.push_back(DRE);
+
return;
}
void RewriteObjC::GetInnerBlockDeclRefExprs(Stmt *S,
SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs,
- llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts) {
+ llvm::SmallPtrSetImpl<const DeclContext *> &InnerContexts) {
for (Stmt::child_range CI = S->children(); CI; ++CI)
if (*CI) {
if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI)) {
@@ -3718,11 +3699,11 @@ void RewriteObjC::GetInnerBlockDeclRefExprs(Stmt *S,
}
// Handle specific things.
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
- if (DRE->refersToEnclosingLocal()) {
- if (!isa<FunctionDecl>(DRE->getDecl()) &&
- !InnerContexts.count(DRE->getDecl()->getDeclContext()))
+ if (DRE->refersToEnclosingVariableOrCapture() ||
+ HasLocalVariableExternalStorage(DRE->getDecl())) {
+ if (!InnerContexts.count(DRE->getDecl()->getDeclContext()))
InnerBlockDeclRefs.push_back(DRE);
- if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VarDecl *Var = cast<VarDecl>(DRE->getDecl()))
if (Var->isFunctionOrMethodVarDecl())
ImportedLocalExternalDecls.insert(Var);
}
@@ -3880,7 +3861,8 @@ Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
// Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR
// for each DeclRefExp where BYREFVAR is name of the variable.
ValueDecl *VD = DeclRefExp->getDecl();
- bool isArrow = DeclRefExp->refersToEnclosingLocal();
+ bool isArrow = DeclRefExp->refersToEnclosingVariableOrCapture() ||
+ HasLocalVariableExternalStorage(DeclRefExp->getDecl());
FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
@@ -4967,9 +4949,8 @@ void RewriteObjC::HandleTranslationUnit(ASTContext &C) {
// Here's a great place to add any extra declarations that may be needed.
// Write out meta data for each @protocol(<expr>).
- for (llvm::SmallPtrSet<ObjCProtocolDecl *,8>::iterator I = ProtocolExprDecls.begin(),
- E = ProtocolExprDecls.end(); I != E; ++I)
- RewriteObjCProtocolMetaData(*I, "", "", Preamble);
+ for (ObjCProtocolDecl *ProtDecl : ProtocolExprDecls)
+ RewriteObjCProtocolMetaData(ProtDecl, "", "", Preamble);
InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false);
if (ClassImplementation.size() || CategoryImplementation.size())
@@ -5276,7 +5257,7 @@ void RewriteObjCFragileABI::RewriteObjCProtocolMetaData(
Result += "};\n";
// Mark this protocol as having been generated.
- if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl()))
+ if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl()).second)
llvm_unreachable("protocol already synthesized");
}
@@ -5657,12 +5638,11 @@ void RewriteObjCFragileABI::RewriteMetaDataIntoBuffer(std::string &Result) {
if (ProtocolExprDecls.size()) {
Result += "#pragma section(\".objc_protocol$B\",long,read,write)\n";
Result += "#pragma data_seg(push, \".objc_protocol$B\")\n";
- for (llvm::SmallPtrSet<ObjCProtocolDecl *,8>::iterator I = ProtocolExprDecls.begin(),
- E = ProtocolExprDecls.end(); I != E; ++I) {
+ for (ObjCProtocolDecl *ProtDecl : ProtocolExprDecls) {
Result += "static struct _objc_protocol *_POINTER_OBJC_PROTOCOL_";
- Result += (*I)->getNameAsString();
+ Result += ProtDecl->getNameAsString();
Result += " = &_OBJC_PROTOCOL_";
- Result += (*I)->getNameAsString();
+ Result += ProtDecl->getNameAsString();
Result += ";\n";
}
Result += "#pragma data_seg(pop)\n\n";
diff --git a/lib/Frontend/SerializedDiagnosticPrinter.cpp b/lib/Frontend/SerializedDiagnosticPrinter.cpp
index 29c58a8ac34e..f701f723a320 100644
--- a/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -14,6 +14,10 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Version.h"
#include "clang/Frontend/DiagnosticRenderer.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/SerializedDiagnosticReader.h"
+#include "clang/Frontend/SerializedDiagnostics.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallString.h"
@@ -86,20 +90,70 @@ protected:
void endDiagnostic(DiagOrStoredDiag D,
DiagnosticsEngine::Level Level) override;
};
-
+
+typedef llvm::DenseMap<unsigned, unsigned> AbbrevLookup;
+
+class SDiagsMerger : SerializedDiagnosticReader {
+ SDiagsWriter &Writer;
+ AbbrevLookup FileLookup;
+ AbbrevLookup CategoryLookup;
+ AbbrevLookup DiagFlagLookup;
+
+public:
+ SDiagsMerger(SDiagsWriter &Writer)
+ : SerializedDiagnosticReader(), Writer(Writer) {}
+
+ std::error_code mergeRecordsFromFile(const char *File) {
+ return readDiagnostics(File);
+ }
+
+protected:
+ std::error_code visitStartOfDiagnostic() override;
+ std::error_code visitEndOfDiagnostic() override;
+ std::error_code visitCategoryRecord(unsigned ID, StringRef Name) override;
+ std::error_code visitDiagFlagRecord(unsigned ID, StringRef Name) override;
+ std::error_code visitDiagnosticRecord(
+ unsigned Severity, const serialized_diags::Location &Location,
+ unsigned Category, unsigned Flag, StringRef Message) override;
+ std::error_code visitFilenameRecord(unsigned ID, unsigned Size,
+ unsigned Timestamp,
+ StringRef Name) override;
+ std::error_code visitFixitRecord(const serialized_diags::Location &Start,
+ const serialized_diags::Location &End,
+ StringRef CodeToInsert) override;
+ std::error_code
+ visitSourceRangeRecord(const serialized_diags::Location &Start,
+ const serialized_diags::Location &End) override;
+
+private:
+ std::error_code adjustSourceLocFilename(RecordData &Record,
+ unsigned int offset);
+
+ void adjustAbbrevID(RecordData &Record, AbbrevLookup &Lookup,
+ unsigned NewAbbrev);
+
+ void writeRecordWithAbbrev(unsigned ID, RecordData &Record);
+
+ void writeRecordWithBlob(unsigned ID, RecordData &Record, StringRef Blob);
+};
+
class SDiagsWriter : public DiagnosticConsumer {
friend class SDiagsRenderer;
+ friend class SDiagsMerger;
struct SharedState;
explicit SDiagsWriter(IntrusiveRefCntPtr<SharedState> State)
- : LangOpts(nullptr), OriginalInstance(false), State(State) {}
+ : LangOpts(nullptr), OriginalInstance(false), MergeChildRecords(false),
+ State(State) {}
public:
- SDiagsWriter(raw_ostream *os, DiagnosticOptions *diags)
- : LangOpts(nullptr), OriginalInstance(true),
- State(new SharedState(os, diags))
- {
+ SDiagsWriter(StringRef File, DiagnosticOptions *Diags, bool MergeChildRecords)
+ : LangOpts(nullptr), OriginalInstance(true),
+ MergeChildRecords(MergeChildRecords),
+ State(new SharedState(File, Diags)) {
+ if (MergeChildRecords)
+ RemoveOldDiagnostics();
EmitPreamble();
}
@@ -115,6 +169,14 @@ public:
void finish() override;
private:
+ /// \brief Build a DiagnosticsEngine to emit diagnostics about the diagnostics
+ DiagnosticsEngine *getMetaDiags();
+
+ /// \brief Remove old copies of the serialized diagnostics. This is necessary
+ /// so that we can detect when subprocesses write diagnostics that we should
+ /// merge into our own.
+ void RemoveOldDiagnostics();
+
/// \brief Emit the preamble for the serialized diagnostics.
void EmitPreamble();
@@ -152,7 +214,9 @@ private:
/// \brief Emit the string information for diagnostic flags.
unsigned getEmitDiagnosticFlag(DiagnosticsEngine::Level DiagLevel,
unsigned DiagID = 0);
-
+
+ unsigned getEmitDiagnosticFlag(StringRef DiagName);
+
/// \brief Emit (lazily) the file string and retrieved the file identifier.
unsigned getEmitFile(const char *Filename);
@@ -173,9 +237,6 @@ private:
void AddCharSourceRangeToRecord(CharSourceRange R, RecordDataImpl &Record,
const SourceManager &SM);
- /// \brief The version of the diagnostics file.
- enum { Version = 2 };
-
/// \brief Language options, which can differ from one clone of this client
/// to another.
const LangOptions *LangOpts;
@@ -184,11 +245,16 @@ private:
/// clones), responsible for writing the file at the end.
bool OriginalInstance;
+ /// \brief Whether this instance should aggregate diagnostics that are
+ /// generated from child processes.
+ bool MergeChildRecords;
+
/// \brief State that is shared among the various clones of this diagnostic
/// consumer.
struct SharedState : RefCountedBase<SharedState> {
- SharedState(raw_ostream *os, DiagnosticOptions *diags)
- : DiagOpts(diags), Stream(Buffer), OS(os), EmittedAnyDiagBlocks(false) { }
+ SharedState(StringRef File, DiagnosticOptions *Diags)
+ : DiagOpts(Diags), Stream(Buffer), OutputFile(File.str()),
+ EmittedAnyDiagBlocks(false) {}
/// \brief Diagnostic options.
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
@@ -200,7 +266,7 @@ private:
llvm::BitstreamWriter Stream;
/// \brief The name of the diagnostics file.
- std::unique_ptr<raw_ostream> OS;
+ std::string OutputFile;
/// \brief The set of constructed record abbreviations.
AbbreviationMap Abbrevs;
@@ -227,6 +293,9 @@ private:
/// this becomes \c true, we never close a DIAG block until we know that we're
/// starting another one or we're done.
bool EmittedAnyDiagBlocks;
+
+ /// \brief Engine for emitting diagnostics about the diagnostics.
+ std::unique_ptr<DiagnosticsEngine> MetaDiagnostics;
};
/// \brief State shared among the various clones of this diagnostic consumer.
@@ -236,9 +305,11 @@ private:
namespace clang {
namespace serialized_diags {
-DiagnosticConsumer *create(raw_ostream *OS, DiagnosticOptions *diags) {
- return new SDiagsWriter(OS, diags);
+std::unique_ptr<DiagnosticConsumer>
+create(StringRef OutputFile, DiagnosticOptions *Diags, bool MergeChildRecords) {
+ return llvm::make_unique<SDiagsWriter>(OutputFile, Diags, MergeChildRecords);
}
+
} // end namespace serialized_diags
} // end namespace clang
@@ -465,17 +536,15 @@ void SDiagsWriter::EmitMetaBlock() {
Stream.EnterSubblock(BLOCK_META, 3);
Record.clear();
Record.push_back(RECORD_VERSION);
- Record.push_back(Version);
+ Record.push_back(VersionNumber);
Stream.EmitRecordWithAbbrev(Abbrevs.get(RECORD_VERSION), Record);
Stream.ExitBlock();
}
unsigned SDiagsWriter::getEmitCategory(unsigned int category) {
- if (State->Categories.count(category))
+ if (!State->Categories.insert(category).second)
return category;
-
- State->Categories.insert(category);
-
+
// We use a local version of 'Record' so that we can be generating
// another record when we lazily generate one for the category entry.
RecordData Record;
@@ -495,6 +564,10 @@ unsigned SDiagsWriter::getEmitDiagnosticFlag(DiagnosticsEngine::Level DiagLevel,
return 0; // No flag for notes.
StringRef FlagName = DiagnosticIDs::getWarningOptionForDiag(DiagID);
+ return getEmitDiagnosticFlag(FlagName);
+}
+
+unsigned SDiagsWriter::getEmitDiagnosticFlag(StringRef FlagName) {
if (FlagName.empty())
return 0;
@@ -689,6 +762,40 @@ void SDiagsRenderer::emitNote(SourceLocation Loc, StringRef Message,
Writer.ExitDiagBlock();
}
+DiagnosticsEngine *SDiagsWriter::getMetaDiags() {
+ // FIXME: It's slightly absurd to create a new diagnostics engine here, but
+ // the other options that are available today are worse:
+ //
+ // 1. Teach DiagnosticsConsumers to emit diagnostics to the engine they are a
+ // part of. The DiagnosticsEngine would need to know not to send
+ // diagnostics back to the consumer that failed. This would require us to
+ // rework ChainedDiagnosticsConsumer and teach the engine about multiple
+ // consumers, which is difficult today because most APIs interface with
+ // consumers rather than the engine itself.
+ //
+ // 2. Pass a DiagnosticsEngine to SDiagsWriter on creation - this would need
+ // to be distinct from the engine the writer was being added to and would
+ // normally not be used.
+ if (!State->MetaDiagnostics) {
+ IntrusiveRefCntPtr<DiagnosticIDs> IDs(new DiagnosticIDs());
+ auto Client =
+ new TextDiagnosticPrinter(llvm::errs(), State->DiagOpts.get());
+ State->MetaDiagnostics = llvm::make_unique<DiagnosticsEngine>(
+ IDs, State->DiagOpts.get(), Client);
+ }
+ return State->MetaDiagnostics.get();
+}
+
+void SDiagsWriter::RemoveOldDiagnostics() {
+ if (!llvm::sys::fs::remove(State->OutputFile))
+ return;
+
+ getMetaDiags()->Report(diag::warn_fe_serialized_diag_merge_failure);
+ // Disable merging child records, as whatever is in this file may be
+ // misleading.
+ MergeChildRecords = false;
+}
+
void SDiagsWriter::finish() {
// The original instance is responsible for writing the file.
if (!OriginalInstance)
@@ -698,9 +805,113 @@ void SDiagsWriter::finish() {
if (State->EmittedAnyDiagBlocks)
ExitDiagBlock();
+ if (MergeChildRecords) {
+ if (!State->EmittedAnyDiagBlocks)
+ // We have no diagnostics of our own, so we can just leave the child
+ // process' output alone
+ return;
+
+ if (llvm::sys::fs::exists(State->OutputFile))
+ if (SDiagsMerger(*this).mergeRecordsFromFile(State->OutputFile.c_str()))
+ getMetaDiags()->Report(diag::warn_fe_serialized_diag_merge_failure);
+ }
+
+ std::error_code EC;
+ auto OS = llvm::make_unique<llvm::raw_fd_ostream>(State->OutputFile.c_str(),
+ EC, llvm::sys::fs::F_None);
+ if (EC) {
+ getMetaDiags()->Report(diag::warn_fe_serialized_diag_failure)
+ << State->OutputFile << EC.message();
+ return;
+ }
+
// Write the generated bitstream to "Out".
- State->OS->write((char *)&State->Buffer.front(), State->Buffer.size());
- State->OS->flush();
+ OS->write((char *)&State->Buffer.front(), State->Buffer.size());
+ OS->flush();
+}
+
+std::error_code SDiagsMerger::visitStartOfDiagnostic() {
+ Writer.EnterDiagBlock();
+ return std::error_code();
+}
+
+std::error_code SDiagsMerger::visitEndOfDiagnostic() {
+ Writer.ExitDiagBlock();
+ return std::error_code();
+}
+
+std::error_code
+SDiagsMerger::visitSourceRangeRecord(const serialized_diags::Location &Start,
+ const serialized_diags::Location &End) {
+ RecordData Record;
+ Record.push_back(RECORD_SOURCE_RANGE);
+ Record.push_back(FileLookup[Start.FileID]);
+ Record.push_back(Start.Line);
+ Record.push_back(Start.Col);
+ Record.push_back(Start.Offset);
+ Record.push_back(FileLookup[End.FileID]);
+ Record.push_back(End.Line);
+ Record.push_back(End.Col);
+ Record.push_back(End.Offset);
+
+ Writer.State->Stream.EmitRecordWithAbbrev(
+ Writer.State->Abbrevs.get(RECORD_SOURCE_RANGE), Record);
+ return std::error_code();
+}
+
+std::error_code SDiagsMerger::visitDiagnosticRecord(
+ unsigned Severity, const serialized_diags::Location &Location,
+ unsigned Category, unsigned Flag, StringRef Message) {
+ RecordData MergedRecord;
+ MergedRecord.push_back(RECORD_DIAG);
+ MergedRecord.push_back(Severity);
+ MergedRecord.push_back(FileLookup[Location.FileID]);
+ MergedRecord.push_back(Location.Line);
+ MergedRecord.push_back(Location.Col);
+ MergedRecord.push_back(Location.Offset);
+ MergedRecord.push_back(CategoryLookup[Category]);
+ MergedRecord.push_back(Flag ? DiagFlagLookup[Flag] : 0);
+ MergedRecord.push_back(Message.size());
+
+ Writer.State->Stream.EmitRecordWithBlob(
+ Writer.State->Abbrevs.get(RECORD_DIAG), MergedRecord, Message);
+ return std::error_code();
+}
+
+std::error_code
+SDiagsMerger::visitFixitRecord(const serialized_diags::Location &Start,
+ const serialized_diags::Location &End,
+ StringRef Text) {
+ RecordData Record;
+ Record.push_back(RECORD_FIXIT);
+ Record.push_back(FileLookup[Start.FileID]);
+ Record.push_back(Start.Line);
+ Record.push_back(Start.Col);
+ Record.push_back(Start.Offset);
+ Record.push_back(FileLookup[End.FileID]);
+ Record.push_back(End.Line);
+ Record.push_back(End.Col);
+ Record.push_back(End.Offset);
+ Record.push_back(Text.size());
+
+ Writer.State->Stream.EmitRecordWithBlob(
+ Writer.State->Abbrevs.get(RECORD_FIXIT), Record, Text);
+ return std::error_code();
+}
+
+std::error_code SDiagsMerger::visitFilenameRecord(unsigned ID, unsigned Size,
+ unsigned Timestamp,
+ StringRef Name) {
+ FileLookup[ID] = Writer.getEmitFile(Name.str().c_str());
+ return std::error_code();
+}
+
+std::error_code SDiagsMerger::visitCategoryRecord(unsigned ID, StringRef Name) {
+ CategoryLookup[ID] = Writer.getEmitCategory(ID);
+ return std::error_code();
+}
- State->OS.reset();
+std::error_code SDiagsMerger::visitDiagFlagRecord(unsigned ID, StringRef Name) {
+ DiagFlagLookup[ID] = Writer.getEmitDiagnosticFlag(Name);
+ return std::error_code();
}
diff --git a/lib/Frontend/SerializedDiagnosticReader.cpp b/lib/Frontend/SerializedDiagnosticReader.cpp
new file mode 100644
index 000000000000..0ebbd22af274
--- /dev/null
+++ b/lib/Frontend/SerializedDiagnosticReader.cpp
@@ -0,0 +1,295 @@
+//===--- SerializedDiagnosticReader.cpp - Reads diagnostics ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/SerializedDiagnosticReader.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/SerializedDiagnostics.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace clang;
+using namespace clang::serialized_diags;
+
+std::error_code SerializedDiagnosticReader::readDiagnostics(StringRef File) {
+ // Open the diagnostics file.
+ FileSystemOptions FO;
+ FileManager FileMgr(FO);
+
+ auto Buffer = FileMgr.getBufferForFile(File);
+ if (!Buffer)
+ return SDError::CouldNotLoad;
+
+ llvm::BitstreamReader StreamFile;
+ StreamFile.init((const unsigned char *)(*Buffer)->getBufferStart(),
+ (const unsigned char *)(*Buffer)->getBufferEnd());
+
+ llvm::BitstreamCursor Stream(StreamFile);
+
+ // Sniff for the signature.
+ if (Stream.Read(8) != 'D' ||
+ Stream.Read(8) != 'I' ||
+ Stream.Read(8) != 'A' ||
+ Stream.Read(8) != 'G')
+ return SDError::InvalidSignature;
+
+ // Read the top level blocks.
+ while (!Stream.AtEndOfStream()) {
+ if (Stream.ReadCode() != llvm::bitc::ENTER_SUBBLOCK)
+ return SDError::InvalidDiagnostics;
+
+ std::error_code EC;
+ switch (Stream.ReadSubBlockID()) {
+ case llvm::bitc::BLOCKINFO_BLOCK_ID:
+ if (Stream.ReadBlockInfoBlock())
+ return SDError::MalformedBlockInfoBlock;
+ continue;
+ case BLOCK_META:
+ if ((EC = readMetaBlock(Stream)))
+ return EC;
+ continue;
+ case BLOCK_DIAG:
+ if ((EC = readDiagnosticBlock(Stream)))
+ return EC;
+ continue;
+ default:
+ if (!Stream.SkipBlock())
+ return SDError::MalformedTopLevelBlock;
+ continue;
+ }
+ }
+ return std::error_code();
+}
+
+enum class SerializedDiagnosticReader::Cursor {
+ Record = 1,
+ BlockEnd,
+ BlockBegin
+};
+
+llvm::ErrorOr<SerializedDiagnosticReader::Cursor>
+SerializedDiagnosticReader::skipUntilRecordOrBlock(
+ llvm::BitstreamCursor &Stream, unsigned &BlockOrRecordID) {
+ BlockOrRecordID = 0;
+
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+
+ switch ((llvm::bitc::FixedAbbrevIDs)Code) {
+ case llvm::bitc::ENTER_SUBBLOCK:
+ BlockOrRecordID = Stream.ReadSubBlockID();
+ return Cursor::BlockBegin;
+
+ case llvm::bitc::END_BLOCK:
+ if (Stream.ReadBlockEnd())
+ return SDError::InvalidDiagnostics;
+ return Cursor::BlockEnd;
+
+ case llvm::bitc::DEFINE_ABBREV:
+ Stream.ReadAbbrevRecord();
+ continue;
+
+ case llvm::bitc::UNABBREV_RECORD:
+ return SDError::UnsupportedConstruct;
+
+ default:
+ // We found a record.
+ BlockOrRecordID = Code;
+ return Cursor::Record;
+ }
+ }
+
+ return SDError::InvalidDiagnostics;
+}
+
+std::error_code
+SerializedDiagnosticReader::readMetaBlock(llvm::BitstreamCursor &Stream) {
+ if (Stream.EnterSubBlock(clang::serialized_diags::BLOCK_META))
+ return SDError::MalformedMetadataBlock;
+
+ bool VersionChecked = false;
+
+ while (true) {
+ unsigned BlockOrCode = 0;
+ llvm::ErrorOr<Cursor> Res = skipUntilRecordOrBlock(Stream, BlockOrCode);
+ if (!Res)
+ Res.getError();
+
+ switch (Res.get()) {
+ case Cursor::Record:
+ break;
+ case Cursor::BlockBegin:
+ if (Stream.SkipBlock())
+ return SDError::MalformedMetadataBlock;
+ case Cursor::BlockEnd:
+ if (!VersionChecked)
+ return SDError::MissingVersion;
+ return std::error_code();
+ }
+
+ SmallVector<uint64_t, 1> Record;
+ unsigned RecordID = Stream.readRecord(BlockOrCode, Record);
+
+ if (RecordID == RECORD_VERSION) {
+ if (Record.size() < 1)
+ return SDError::MissingVersion;
+ if (Record[0] > VersionNumber)
+ return SDError::VersionMismatch;
+ VersionChecked = true;
+ }
+ }
+}
+
+std::error_code
+SerializedDiagnosticReader::readDiagnosticBlock(llvm::BitstreamCursor &Stream) {
+ if (Stream.EnterSubBlock(clang::serialized_diags::BLOCK_DIAG))
+ return SDError::MalformedDiagnosticBlock;
+
+ std::error_code EC;
+ if ((EC = visitStartOfDiagnostic()))
+ return EC;
+
+ SmallVector<uint64_t, 16> Record;
+ while (true) {
+ unsigned BlockOrCode = 0;
+ llvm::ErrorOr<Cursor> Res = skipUntilRecordOrBlock(Stream, BlockOrCode);
+ if (!Res)
+ Res.getError();
+
+ switch (Res.get()) {
+ case Cursor::BlockBegin:
+ // The only blocks we care about are subdiagnostics.
+ if (BlockOrCode == serialized_diags::BLOCK_DIAG) {
+ if ((EC = readDiagnosticBlock(Stream)))
+ return EC;
+ } else if (!Stream.SkipBlock())
+ return SDError::MalformedSubBlock;
+ continue;
+ case Cursor::BlockEnd:
+ if ((EC = visitEndOfDiagnostic()))
+ return EC;
+ return std::error_code();
+ case Cursor::Record:
+ break;
+ }
+
+ // Read the record.
+ Record.clear();
+ StringRef Blob;
+ unsigned RecID = Stream.readRecord(BlockOrCode, Record, &Blob);
+
+ if (RecID < serialized_diags::RECORD_FIRST ||
+ RecID > serialized_diags::RECORD_LAST)
+ continue;
+
+ switch ((RecordIDs)RecID) {
+ case RECORD_CATEGORY:
+ // A category has ID and name size.
+ if (Record.size() != 2)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitCategoryRecord(Record[0], Blob)))
+ return EC;
+ continue;
+ case RECORD_DIAG:
+ // A diagnostic has severity, location (4), category, flag, and message
+ // size.
+ if (Record.size() != 8)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitDiagnosticRecord(
+ Record[0], Location(Record[1], Record[2], Record[3], Record[4]),
+ Record[5], Record[6], Blob)))
+ return EC;
+ continue;
+ case RECORD_DIAG_FLAG:
+ // A diagnostic flag has ID and name size.
+ if (Record.size() != 2)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitDiagFlagRecord(Record[0], Blob)))
+ return EC;
+ continue;
+ case RECORD_FILENAME:
+ // A filename has ID, size, timestamp, and name size. The size and
+ // timestamp are legacy fields that are always zero these days.
+ if (Record.size() != 4)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitFilenameRecord(Record[0], Record[1], Record[2], Blob)))
+ return EC;
+ continue;
+ case RECORD_FIXIT:
+ // A fixit has two locations (4 each) and message size.
+ if (Record.size() != 9)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitFixitRecord(
+ Location(Record[0], Record[1], Record[2], Record[3]),
+ Location(Record[4], Record[5], Record[6], Record[7]), Blob)))
+ return EC;
+ continue;
+ case RECORD_SOURCE_RANGE:
+ // A source range is two locations (4 each).
+ if (Record.size() != 8)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitSourceRangeRecord(
+ Location(Record[0], Record[1], Record[2], Record[3]),
+ Location(Record[4], Record[5], Record[6], Record[7]))))
+ return EC;
+ continue;
+ case RECORD_VERSION:
+ // A version is just a number.
+ if (Record.size() != 1)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitVersionRecord(Record[0])))
+ return EC;
+ continue;
+ }
+ }
+}
+
+namespace {
+class SDErrorCategoryType final : public std::error_category {
+ const char *name() const LLVM_NOEXCEPT override {
+ return "clang.serialized_diags";
+ }
+ std::string message(int IE) const override {
+ SDError E = static_cast<SDError>(IE);
+ switch (E) {
+ case SDError::CouldNotLoad:
+ return "Failed to open diagnostics file";
+ case SDError::InvalidSignature:
+ return "Invalid diagnostics signature";
+ case SDError::InvalidDiagnostics:
+ return "Parse error reading diagnostics";
+ case SDError::MalformedTopLevelBlock:
+ return "Malformed block at top-level of diagnostics";
+ case SDError::MalformedSubBlock:
+ return "Malformed sub-block in a diagnostic";
+ case SDError::MalformedBlockInfoBlock:
+ return "Malformed BlockInfo block";
+ case SDError::MalformedMetadataBlock:
+ return "Malformed Metadata block";
+ case SDError::MalformedDiagnosticBlock:
+ return "Malformed Diagnostic block";
+ case SDError::MalformedDiagnosticRecord:
+ return "Malformed Diagnostic record";
+ case SDError::MissingVersion:
+ return "No version provided in diagnostics";
+ case SDError::VersionMismatch:
+ return "Unsupported diagnostics version";
+ case SDError::UnsupportedConstruct:
+ return "Bitcode constructs that are not supported in diagnostics appear";
+ case SDError::HandlerFailed:
+ return "Generic error occurred while handling a record";
+ }
+ llvm_unreachable("Unknown error type!");
+ }
+};
+}
+
+static llvm::ManagedStatic<SDErrorCategoryType> ErrorCategory;
+const std::error_category &clang::serialized_diags::SDErrorCategory() {
+ return *ErrorCategory;
+}
diff --git a/lib/Frontend/TextDiagnostic.cpp b/lib/Frontend/TextDiagnostic.cpp
index 6e6f3dd1bfee..bbc99141f072 100644
--- a/lib/Frontend/TextDiagnostic.cpp
+++ b/lib/Frontend/TextDiagnostic.cpp
@@ -176,7 +176,7 @@ static void expandTabs(std::string &SourceLine, unsigned TabStop) {
/// of the printable representation of the line to the columns those printable
/// characters will appear at (numbering the first column as 0).
///
-/// If a byte 'i' corresponds to muliple columns (e.g. the byte contains a tab
+/// If a byte 'i' corresponds to multiple columns (e.g. the byte contains a tab
/// character) then the array will map that byte to the first column the
/// tab appears at and the next value in the map will have been incremented
/// more than once.
@@ -293,14 +293,14 @@ struct SourceColumnMap {
/// \brief Map from a byte index to the next byte which starts a column.
int startOfNextColumn(int N) const {
- assert(0 <= N && N < static_cast<int>(m_columnToByte.size() - 1));
+ assert(0 <= N && N < static_cast<int>(m_byteToColumn.size() - 1));
while (byteToColumn(++N) == -1) {}
return N;
}
/// \brief Map from a byte index to the previous byte which starts a column.
int startOfPreviousColumn(int N) const {
- assert(0 < N && N < static_cast<int>(m_columnToByte.size()));
+ assert(0 < N && N < static_cast<int>(m_byteToColumn.size()));
while (byteToColumn(--N) == -1) {}
return N;
}
@@ -323,9 +323,10 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
std::string &FixItInsertionLine,
unsigned Columns,
const SourceColumnMap &map) {
- unsigned MaxColumns = std::max<unsigned>(map.columns(),
- std::max(CaretLine.size(),
- FixItInsertionLine.size()));
+ unsigned CaretColumns = CaretLine.size();
+ unsigned FixItColumns = llvm::sys::locale::columnWidth(FixItInsertionLine);
+ unsigned MaxColumns = std::max(static_cast<unsigned>(map.columns()),
+ std::max(CaretColumns, FixItColumns));
// if the number of columns is less than the desired number we're done
if (MaxColumns <= Columns)
return;
@@ -487,7 +488,7 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
// We checked up front that the line needed truncation
assert(FrontColumnsRemoved+ColumnsKept+BackColumnsRemoved > Columns);
- // The line needs some trunctiona, and we'd prefer to keep the front
+ // The line needs some truncation, and we'd prefer to keep the front
// if possible, so remove the back
if (BackColumnsRemoved > strlen(back_ellipse))
SourceLine.replace(SourceEnd, std::string::npos, back_ellipse);
@@ -1110,12 +1111,13 @@ void TextDiagnostic::emitSnippetAndCaret(
// Copy the line of code into an std::string for ease of manipulation.
std::string SourceLine(LineStart, LineEnd);
- // Create a line for the caret that is filled with spaces that is the same
- // length as the line of source code.
- std::string CaretLine(LineEnd-LineStart, ' ');
-
+ // Build the byte to column map.
const SourceColumnMap sourceColMap(SourceLine, DiagOpts->TabStop);
+ // Create a line for the caret that is filled with spaces that is the same
+ // number of columns as the line of source code.
+ std::string CaretLine(sourceColMap.columns(), ' ');
+
// Highlight all of the characters covered by Ranges with ~ characters.
for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
E = Ranges.end();
diff --git a/lib/Frontend/VerifyDiagnosticConsumer.cpp b/lib/Frontend/VerifyDiagnosticConsumer.cpp
index b50950e9b300..3ff6b18e2199 100644
--- a/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -27,14 +27,13 @@ typedef VerifyDiagnosticConsumer::Directive Directive;
typedef VerifyDiagnosticConsumer::DirectiveList DirectiveList;
typedef VerifyDiagnosticConsumer::ExpectedData ExpectedData;
-VerifyDiagnosticConsumer::VerifyDiagnosticConsumer(DiagnosticsEngine &_Diags)
- : Diags(_Diags),
- PrimaryClient(Diags.getClient()), OwnsPrimaryClient(Diags.ownsClient()),
+VerifyDiagnosticConsumer::VerifyDiagnosticConsumer(DiagnosticsEngine &Diags_)
+ : Diags(Diags_),
+ PrimaryClient(Diags.getClient()), PrimaryClientOwner(Diags.takeClient()),
Buffer(new TextDiagnosticBuffer()), CurrentPreprocessor(nullptr),
LangOpts(nullptr), SrcManager(nullptr), ActiveSourceFiles(0),
Status(HasNoDirectives)
{
- Diags.takeClient();
if (Diags.hasSourceManager())
setSourceManager(Diags.getSourceManager());
}
@@ -43,10 +42,8 @@ VerifyDiagnosticConsumer::~VerifyDiagnosticConsumer() {
assert(!ActiveSourceFiles && "Incomplete parsing of source files!");
assert(!CurrentPreprocessor && "CurrentPreprocessor should be invalid!");
SrcManager = nullptr;
- CheckDiagnostics();
- Diags.takeClient();
- if (OwnsPrimaryClient)
- delete PrimaryClient;
+ CheckDiagnostics();
+ Diags.takeClient().release();
}
#ifndef NDEBUG
@@ -84,8 +81,8 @@ void VerifyDiagnosticConsumer::BeginSourceFile(const LangOptions &LangOpts,
const_cast<Preprocessor*>(PP)->addCommentHandler(this);
#ifndef NDEBUG
// Debug build tracks parsed files.
- VerifyFileTracker *V = new VerifyFileTracker(*this, *SrcManager);
- const_cast<Preprocessor*>(PP)->addPPCallbacks(V);
+ const_cast<Preprocessor*>(PP)->addPPCallbacks(
+ llvm::make_unique<VerifyFileTracker>(*this, *SrcManager));
#endif
}
}
@@ -402,8 +399,9 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
// Lookup file via Preprocessor, like a #include.
const DirectoryLookup *CurDir;
- const FileEntry *FE = PP->LookupFile(Pos, Filename, false, nullptr,
- CurDir, nullptr, nullptr, nullptr);
+ const FileEntry *FE =
+ PP->LookupFile(Pos, Filename, false, nullptr, nullptr, CurDir,
+ nullptr, nullptr, nullptr);
if (!FE) {
Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
diag::err_verify_missing_file) << Filename << KindStr;
@@ -502,13 +500,12 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
}
// Construct new directive.
- std::unique_ptr<Directive> D(
- Directive::create(RegexKind, Pos, ExpectedLoc, MatchAnyLine, Text,
- Min, Max));
+ std::unique_ptr<Directive> D = Directive::create(
+ RegexKind, Pos, ExpectedLoc, MatchAnyLine, Text, Min, Max);
std::string Error;
if (D->isValid(Error)) {
- DL->push_back(D.release());
+ DL->push_back(std::move(D));
FoundDirective = true;
} else {
Diags.Report(Pos.getLocWithOffset(ContentBegin-PH.Begin),
@@ -644,15 +641,16 @@ static unsigned PrintUnexpected(DiagnosticsEngine &Diags, SourceManager *SourceM
/// \brief Takes a list of diagnostics that were expected to have been generated
/// but were not and produces a diagnostic to the user from this.
-static unsigned PrintExpected(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
- DirectiveList &DL, const char *Kind) {
+static unsigned PrintExpected(DiagnosticsEngine &Diags,
+ SourceManager &SourceMgr,
+ std::vector<Directive *> &DL, const char *Kind) {
if (DL.empty())
return 0;
SmallString<256> Fmt;
llvm::raw_svector_ostream OS(Fmt);
- for (DirectiveList::iterator I = DL.begin(), E = DL.end(); I != E; ++I) {
- Directive &D = **I;
+ for (auto *DirPtr : DL) {
+ Directive &D = *DirPtr;
OS << "\n File " << SourceMgr.getFilename(D.DiagnosticLoc);
if (D.MatchAnyLine)
OS << " Line *";
@@ -694,11 +692,11 @@ static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
DirectiveList &Left,
const_diag_iterator d2_begin,
const_diag_iterator d2_end) {
- DirectiveList LeftOnly;
+ std::vector<Directive *> LeftOnly;
DiagList Right(d2_begin, d2_end);
- for (DirectiveList::iterator I = Left.begin(), E = Left.end(); I != E; ++I) {
- Directive& D = **I;
+ for (auto &Owner : Left) {
+ Directive &D = *Owner;
unsigned LineNo1 = SourceMgr.getPresumedLineNumber(D.DiagnosticLoc);
for (unsigned i = 0; i < D.Max; ++i) {
@@ -720,7 +718,7 @@ static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
if (II == IE) {
// Not found.
if (i >= D.Min) break;
- LeftOnly.push_back(*I);
+ LeftOnly.push_back(&D);
} else {
// Found. The same cannot be found twice.
Right.erase(II);
@@ -801,8 +799,8 @@ void VerifyDiagnosticConsumer::UpdateParsedFileStatus(SourceManager &SM,
void VerifyDiagnosticConsumer::CheckDiagnostics() {
// Ensure any diagnostics go to the primary client.
- bool OwnsCurClient = Diags.ownsClient();
- DiagnosticConsumer *CurClient = Diags.takeClient();
+ DiagnosticConsumer *CurClient = Diags.getClient();
+ std::unique_ptr<DiagnosticConsumer> Owner = Diags.takeClient();
Diags.setClient(PrimaryClient, false);
#ifndef NDEBUG
@@ -864,20 +862,21 @@ void VerifyDiagnosticConsumer::CheckDiagnostics() {
Buffer->note_end(), "note"));
}
- Diags.takeClient();
- Diags.setClient(CurClient, OwnsCurClient);
+ Diags.setClient(CurClient, Owner.release() != nullptr);
// Reset the buffer, we have processed all the diagnostics in it.
Buffer.reset(new TextDiagnosticBuffer());
ED.Reset();
}
-Directive *Directive::create(bool RegexKind, SourceLocation DirectiveLoc,
- SourceLocation DiagnosticLoc, bool MatchAnyLine,
- StringRef Text, unsigned Min, unsigned Max) {
+std::unique_ptr<Directive> Directive::create(bool RegexKind,
+ SourceLocation DirectiveLoc,
+ SourceLocation DiagnosticLoc,
+ bool MatchAnyLine, StringRef Text,
+ unsigned Min, unsigned Max) {
if (!RegexKind)
- return new StandardDirective(DirectiveLoc, DiagnosticLoc, MatchAnyLine,
- Text, Min, Max);
+ return llvm::make_unique<StandardDirective>(DirectiveLoc, DiagnosticLoc,
+ MatchAnyLine, Text, Min, Max);
// Parse the directive into a regular expression.
std::string RegexStr;
@@ -902,6 +901,6 @@ Directive *Directive::create(bool RegexKind, SourceLocation DirectiveLoc,
}
}
- return new RegexDirective(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text,
- Min, Max, RegexStr);
+ return llvm::make_unique<RegexDirective>(
+ DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max, RegexStr);
}
diff --git a/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index de864f653562..79cf0049a7b2 100644
--- a/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -221,6 +221,6 @@ bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
return false;
bool Success = Clang->ExecuteAction(*Act);
if (Clang->getFrontendOpts().DisableFree)
- BuryPointer(Act.release());
+ BuryPointer(std::move(Act));
return Success;
}
diff --git a/lib/Headers/CMakeLists.txt b/lib/Headers/CMakeLists.txt
index edee7d764250..080550f7c77f 100644
--- a/lib/Headers/CMakeLists.txt
+++ b/lib/Headers/CMakeLists.txt
@@ -1,11 +1,18 @@
set(files
+ adxintrin.h
altivec.h
ammintrin.h
arm_acle.h
- avxintrin.h
avx2intrin.h
- bmiintrin.h
+ avx512bwintrin.h
+ avx512erintrin.h
+ avx512fintrin.h
+ avx512vlbwintrin.h
+ avx512vlintrin.h
+ avxintrin.h
bmi2intrin.h
+ bmiintrin.h
+ cpuid.h
emmintrin.h
f16cintrin.h
float.h
@@ -13,13 +20,14 @@ set(files
fmaintrin.h
ia32intrin.h
immintrin.h
- iso646.h
Intrin.h
+ iso646.h
limits.h
lzcntintrin.h
mm3dnow.h
mmintrin.h
mm_malloc.h
+ module.modulemap
nmmintrin.h
pmmintrin.h
popcntintrin.h
@@ -30,23 +38,24 @@ set(files
smmintrin.h
stdalign.h
stdarg.h
+ stdatomic.h
stdbool.h
stddef.h
+ __stddef_max_align_t.h
stdint.h
stdnoreturn.h
tbmintrin.h
tgmath.h
tmmintrin.h
+ unwind.h
+ vadefs.h
varargs.h
- wmmintrin.h
__wmmintrin_aes.h
+ wmmintrin.h
__wmmintrin_pclmul.h
x86intrin.h
xmmintrin.h
xopintrin.h
- cpuid.h
- unwind.h
- module.modulemap
)
set(output_dir ${LLVM_LIBRARY_OUTPUT_INTDIR}/clang/${CLANG_VERSION}/include)
diff --git a/lib/Headers/Intrin.h b/lib/Headers/Intrin.h
index 13e105ec1782..84bc4303a133 100644
--- a/lib/Headers/Intrin.h
+++ b/lib/Headers/Intrin.h
@@ -160,9 +160,6 @@ void __writefsword(unsigned long, unsigned short);
void __writemsr(unsigned long, unsigned __int64);
static __inline__
void *_AddressOfReturnAddress(void);
-unsigned int _andn_u32(unsigned int, unsigned int);
-unsigned int _bextr_u32(unsigned int, unsigned int, unsigned int);
-unsigned int _bextri_u32(unsigned int, unsigned int);
static __inline__
unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
static __inline__
@@ -175,20 +172,9 @@ static __inline__
unsigned char _bittestandreset(long *, long);
static __inline__
unsigned char _bittestandset(long *, long);
-unsigned int _blcfill_u32(unsigned int);
-unsigned int _blci_u32(unsigned int);
-unsigned int _blcic_u32(unsigned int);
-unsigned int _blcmsk_u32(unsigned int);
-unsigned int _blcs_u32(unsigned int);
-unsigned int _blsfill_u32(unsigned int);
-unsigned int _blsi_u32(unsigned int);
-unsigned int _blsic_u32(unsigned int);
-unsigned int _blsmsk_u32(unsigned int);
-unsigned int _blsr_u32(unsigned int);
unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
unsigned long __cdecl _byteswap_ulong(unsigned long);
unsigned short __cdecl _byteswap_ushort(unsigned short);
-unsigned _bzhi_u32(unsigned int, unsigned int);
void __cdecl _disable(void);
void __cdecl _enable(void);
void __cdecl _fxrstor(void const *);
@@ -266,7 +252,6 @@ unsigned long __cdecl _lrotl(unsigned long, int);
static __inline__
unsigned long __cdecl _lrotr(unsigned long, int);
static __inline__
-unsigned int _lzcnt_u32(unsigned int);
static __inline__
void _ReadBarrier(void);
static __inline__
@@ -274,8 +259,6 @@ void _ReadWriteBarrier(void);
static __inline__
void *_ReturnAddress(void);
unsigned int _rorx_u32(unsigned int, const unsigned int);
-int __cdecl _rdrand16_step(unsigned short *);
-int __cdecl _rdrand32_step(unsigned int *);
static __inline__
unsigned int __cdecl _rotl(unsigned int _Value, int _Shift);
static __inline__
@@ -301,12 +284,8 @@ unsigned int _shrx_u32(unsigned int, unsigned int);
void _Store_HLERelease(long volatile *, long);
void _Store64_HLERelease(__int64 volatile *, __int64);
void _StorePointer_HLERelease(void *volatile *, void *);
-unsigned int _t1mskc_u32(unsigned int);
-unsigned int _tzcnt_u32(unsigned int);
-unsigned int _tzmsk_u32(unsigned int);
static __inline__
void _WriteBarrier(void);
-void _xabort(const unsigned int imm);
unsigned __int32 xbegin(void);
void _xend(void);
static __inline__
@@ -315,7 +294,6 @@ void __cdecl _xrstor(void const *, unsigned __int64);
void __cdecl _xsave(void *, unsigned __int64);
void __cdecl _xsaveopt(void *, unsigned __int64);
void __cdecl _xsetbv(unsigned int, unsigned __int64);
-unsigned char _xtest(void);
/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
#ifdef __x86_64__
@@ -352,7 +330,6 @@ unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
unsigned char _Shift);
static __inline__
void __stosq(unsigned __int64 *, unsigned __int64, size_t);
-unsigned __int64 __umulh(unsigned __int64, unsigned __int64);
unsigned char __vmx_on(unsigned __int64 *);
unsigned char __vmx_vmclear(unsigned __int64 *);
unsigned char __vmx_vmlaunch(void);
@@ -364,9 +341,6 @@ void __writegsbyte(unsigned long, unsigned char);
void __writegsdword(unsigned long, unsigned long);
void __writegsqword(unsigned long, unsigned __int64);
void __writegsword(unsigned long, unsigned short);
-unsigned __int64 _andn_u64(unsigned __int64, unsigned __int64);
-unsigned __int64 _bextr_u64(unsigned __int64, unsigned int, unsigned int);
-unsigned __int64 _bextri_u64(unsigned __int64, unsigned int);
static __inline__
unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
static __inline__
@@ -379,18 +353,7 @@ static __inline__
unsigned char _bittestandreset64(__int64 *, __int64);
static __inline__
unsigned char _bittestandset64(__int64 *, __int64);
-unsigned __int64 _blcfill_u64(unsigned __int64);
-unsigned __int64 _blci_u64(unsigned __int64);
-unsigned __int64 _blcic_u64(unsigned __int64);
-unsigned __int64 _blcmsk_u64(unsigned __int64);
-unsigned __int64 _blcs_u64(unsigned __int64);
-unsigned __int64 _blsfill_u64(unsigned __int64);
-unsigned __int64 _blsi_u64(unsigned __int64);
-unsigned __int64 _blsic_u64(unsigned __int64);
-unsigned __int64 _blsmsk_u64(unsigned __int64);
-unsigned __int64 _blsr_u64(unsigned __int64);
unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
-unsigned __int64 _bzhi_u64(unsigned __int64, unsigned int);
void __cdecl _fxrstor64(void const *);
void __cdecl _fxsave64(void *);
long _InterlockedAnd_np(long volatile *_Value, long _Mask);
@@ -444,29 +407,33 @@ __int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
char _InterlockedXor8_np(char volatile *_Value, char _Mask);
static __inline__
-unsigned __int64 _lzcnt_u64(unsigned __int64);
__int64 _mul128(__int64 _Multiplier, __int64 _Multiplicand,
__int64 *_HighProduct);
-unsigned int __cdecl _readfsbase_u32(void);
-unsigned __int64 __cdecl _readfsbase_u64(void);
-unsigned int __cdecl _readgsbase_u32(void);
-unsigned __int64 __cdecl _readgsbase_u64(void);
unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
__int64 _sarx_i64(__int64, unsigned int);
#if __STDC_HOSTED__
int __cdecl _setjmpex(jmp_buf);
#endif
unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
-unsigned __int64 shrx_u64(unsigned __int64, unsigned int);
-unsigned __int64 _tzcnt_u64(unsigned __int64);
-unsigned __int64 _tzmsk_u64(unsigned __int64);
-unsigned __int64 _umul128(unsigned __int64 _Multiplier,
- unsigned __int64 _Multiplicand,
- unsigned __int64 *_HighProduct);
-void __cdecl _writefsbase_u32(unsigned int);
-void _cdecl _writefsbase_u64(unsigned __int64);
-void __cdecl _writegsbase_u32(unsigned int);
-void __cdecl _writegsbase_u64(unsigned __int64);
+unsigned __int64 _shrx_u64(unsigned __int64, unsigned int);
+/*
+ * Multiply two 64-bit integers and obtain a 64-bit result.
+ * The low-half is returned directly and the high half is in an out parameter.
+ */
+static __inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
+_umul128(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand,
+ unsigned __int64 *_HighProduct) {
+ unsigned __int128 _FullProduct =
+ (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
+ *_HighProduct = _FullProduct >> 64;
+ return _FullProduct;
+}
+static __inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
+__umulh(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand) {
+ unsigned __int128 _FullProduct =
+ (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
+ return _FullProduct >> 64;
+}
void __cdecl _xrstor64(void const *, unsigned __int64);
void __cdecl _xsave64(void *, unsigned __int64);
void __cdecl _xsaveopt64(void *, unsigned __int64);
@@ -545,12 +512,6 @@ _BitScanReverse(unsigned long *_Index, unsigned long _Mask) {
*_Index = 31 - __builtin_clzl(_Mask);
return 1;
}
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
-_lzcnt_u32(unsigned int a) {
- if (!a)
- return 32;
- return __builtin_clzl(a);
-}
static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
__popcnt16(unsigned short value) {
return __builtin_popcount((int)value);
@@ -608,13 +569,6 @@ _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask) {
*_Index = 63 - __builtin_clzll(_Mask);
return 1;
}
-static
-__inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
-_lzcnt_u64(unsigned __int64 a) {
- if (!a)
- return 64;
- return __builtin_clzll(a);
-}
static __inline__
unsigned __int64 __attribute__((__always_inline__, __nodebug__))
__popcnt64(unsigned __int64 value) {
@@ -861,10 +815,6 @@ static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
__readfsbyte(unsigned long __offset) {
return *__ptr_to_addr_space(257, unsigned char, __offset);
}
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__readfsdword(unsigned long __offset) {
- return *__ptr_to_addr_space(257, unsigned long, __offset);
-}
static __inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
__readfsqword(unsigned long __offset) {
return *__ptr_to_addr_space(257, unsigned __int64, __offset);
diff --git a/lib/Headers/__stddef_max_align_t.h b/lib/Headers/__stddef_max_align_t.h
new file mode 100644
index 000000000000..a06f412c53fb
--- /dev/null
+++ b/lib/Headers/__stddef_max_align_t.h
@@ -0,0 +1,40 @@
+/*===---- __stddef_max_align_t.h - Definition of max_align_t for modules ---===
+ *
+ * Copyright (c) 2014 Chandler Carruth
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_MAX_ALIGN_T_DEFINED
+#define __CLANG_MAX_ALIGN_T_DEFINED
+
+#ifndef _MSC_VER
+typedef struct {
+ long long __clang_max_align_nonce1
+ __attribute__((__aligned__(__alignof__(long long))));
+ long double __clang_max_align_nonce2
+ __attribute__((__aligned__(__alignof__(long double))));
+} max_align_t;
+#else
+typedef double max_align_t;
+#endif
+
+#endif
diff --git a/lib/Headers/adxintrin.h b/lib/Headers/adxintrin.h
new file mode 100644
index 000000000000..9db8bcbd9030
--- /dev/null
+++ b/lib/Headers/adxintrin.h
@@ -0,0 +1,83 @@
+/*===---- adxintrin.h - ADX intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <adxintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __ADXINTRIN_H
+#define __ADXINTRIN_H
+
+/* Intrinsics that are available only if __ADX__ defined */
+#ifdef __ADX__
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__))
+_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p)
+{
+ return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__))
+_addcarryx_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p)
+{
+ return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
+}
+#endif
+#endif
+
+/* Intrinsics that are also available if __ADX__ undefined */
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__))
+_addcarry_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p)
+{
+ return __builtin_ia32_addcarry_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__))
+_addcarry_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p)
+{
+ return __builtin_ia32_addcarry_u64(__cf, __x, __y, __p);
+}
+#endif
+
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__))
+_subborrow_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p)
+{
+ return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__))
+_subborrow_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p)
+{
+ return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p);
+}
+#endif
+
+#endif /* __ADXINTRIN_H */
diff --git a/lib/Headers/altivec.h b/lib/Headers/altivec.h
index f9fc64af3e74..0ac0841ae482 100644
--- a/lib/Headers/altivec.h
+++ b/lib/Headers/altivec.h
@@ -1623,6 +1623,21 @@ vec_vctuxs(vector float __a, int __b)
return __builtin_altivec_vctuxs(__a, __b);
}
+/* vec_div */
+#ifdef __VSX__
+static vector float __ATTRS_o_ai
+vec_div(vector float __a, vector float __b)
+{
+ return __builtin_vsx_xvdivsp(__a, __b);
+}
+
+static vector double __ATTRS_o_ai
+vec_div(vector double __a, vector double __b)
+{
+ return __builtin_vsx_xvdivdp(__a, __b);
+}
+#endif
+
/* vec_dss */
static void __attribute__((__always_inline__))
@@ -2253,91 +2268,273 @@ vec_vlogefp(vector float __a)
/* vec_lvsl */
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const signed char *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const signed char *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const unsigned char *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const unsigned char *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const short *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const short *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const unsigned short *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const unsigned short *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const int *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const int *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const unsigned int *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const unsigned int *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const float *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const float *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
/* vec_lvsr */
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const signed char *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const signed char *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const unsigned char *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const unsigned char *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const short *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const short *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const unsigned short *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const unsigned short *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const int *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const int *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const unsigned int *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const unsigned int *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const float *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const float *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
/* vec_madd */
@@ -2485,8 +2682,20 @@ vec_max(vector unsigned int __a, vector bool int __b)
static vector float __ATTRS_o_ai
vec_max(vector float __a, vector float __b)
{
+#ifdef __VSX__
+ return __builtin_vsx_xvmaxsp(__a, __b);
+#else
return __builtin_altivec_vmaxfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_max(vector double __a, vector double __b)
+{
+ return __builtin_vsx_xvmaxdp(__a, __b);
}
+#endif
/* vec_vmaxsb */
@@ -2613,7 +2822,11 @@ vec_vmaxuw(vector unsigned int __a, vector bool int __b)
static vector float __attribute__((__always_inline__))
vec_vmaxfp(vector float __a, vector float __b)
{
+#ifdef __VSX__
+ return __builtin_vsx_xvmaxsp(__a, __b);
+#else
return __builtin_altivec_vmaxfp(__a, __b);
+#endif
}
/* vec_mergeh */
@@ -3117,9 +3330,21 @@ vec_min(vector unsigned int __a, vector bool int __b)
static vector float __ATTRS_o_ai
vec_min(vector float __a, vector float __b)
{
+#ifdef __VSX__
+ return __builtin_vsx_xvminsp(__a, __b);
+#else
return __builtin_altivec_vminfp(__a, __b);
+#endif
}
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_min(vector double __a, vector double __b)
+{
+ return __builtin_vsx_xvmindp(__a, __b);
+}
+#endif
+
/* vec_vminsb */
static vector signed char __ATTRS_o_ai
@@ -3245,7 +3470,11 @@ vec_vminuw(vector unsigned int __a, vector bool int __b)
static vector float __attribute__((__always_inline__))
vec_vminfp(vector float __a, vector float __b)
{
+#ifdef __VSX__
+ return __builtin_vsx_xvminsp(__a, __b);
+#else
return __builtin_altivec_vminfp(__a, __b);
+#endif
}
/* vec_mladd */
@@ -4506,7 +4735,7 @@ vec_vpkswus(vector unsigned int __a, vector unsigned int __b)
// in that the vec_xor can be recognized as a vec_nor (and for P8 and
// later, possibly a vec_nand).
-vector signed char __ATTRS_o_ai
+static vector signed char __ATTRS_o_ai
vec_perm(vector signed char __a, vector signed char __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4521,7 +4750,7 @@ vec_perm(vector signed char __a, vector signed char __b, vector unsigned char __
#endif
}
-vector unsigned char __ATTRS_o_ai
+static vector unsigned char __ATTRS_o_ai
vec_perm(vector unsigned char __a,
vector unsigned char __b,
vector unsigned char __c)
@@ -4538,7 +4767,7 @@ vec_perm(vector unsigned char __a,
#endif
}
-vector bool char __ATTRS_o_ai
+static vector bool char __ATTRS_o_ai
vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4553,7 +4782,7 @@ vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c)
#endif
}
-vector short __ATTRS_o_ai
+static vector short __ATTRS_o_ai
vec_perm(vector short __a, vector short __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4568,7 +4797,7 @@ vec_perm(vector short __a, vector short __b, vector unsigned char __c)
#endif
}
-vector unsigned short __ATTRS_o_ai
+static vector unsigned short __ATTRS_o_ai
vec_perm(vector unsigned short __a,
vector unsigned short __b,
vector unsigned char __c)
@@ -4585,7 +4814,7 @@ vec_perm(vector unsigned short __a,
#endif
}
-vector bool short __ATTRS_o_ai
+static vector bool short __ATTRS_o_ai
vec_perm(vector bool short __a, vector bool short __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4600,7 +4829,7 @@ vec_perm(vector bool short __a, vector bool short __b, vector unsigned char __c)
#endif
}
-vector pixel __ATTRS_o_ai
+static vector pixel __ATTRS_o_ai
vec_perm(vector pixel __a, vector pixel __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4615,7 +4844,7 @@ vec_perm(vector pixel __a, vector pixel __b, vector unsigned char __c)
#endif
}
-vector int __ATTRS_o_ai
+static vector int __ATTRS_o_ai
vec_perm(vector int __a, vector int __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4628,7 +4857,7 @@ vec_perm(vector int __a, vector int __b, vector unsigned char __c)
#endif
}
-vector unsigned int __ATTRS_o_ai
+static vector unsigned int __ATTRS_o_ai
vec_perm(vector unsigned int __a, vector unsigned int __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4643,7 +4872,7 @@ vec_perm(vector unsigned int __a, vector unsigned int __b, vector unsigned char
#endif
}
-vector bool int __ATTRS_o_ai
+static vector bool int __ATTRS_o_ai
vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4658,7 +4887,7 @@ vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c)
#endif
}
-vector float __ATTRS_o_ai
+static vector float __ATTRS_o_ai
vec_perm(vector float __a, vector float __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4673,6 +4902,52 @@ vec_perm(vector float __a, vector float __b, vector unsigned char __c)
#endif
}
+#ifdef __VSX__
+static vector long long __ATTRS_o_ai
+vec_perm(vector long long __a, vector long long __b, vector unsigned char __c)
+{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector long long)__builtin_altivec_vperm_4si(__b, __a, __d);
+#else
+ return (vector long long)__builtin_altivec_vperm_4si(__a, __b, __c);
+#endif
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_perm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c)
+{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned long long)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
+ return (vector unsigned long long)
+ __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static vector double __ATTRS_o_ai
+vec_perm(vector double __a, vector double __b, vector unsigned char __c)
+{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector double)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
+ return (vector double)
+ __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
+}
+#endif
+
/* vec_vperm */
static vector signed char __ATTRS_o_ai
@@ -4745,6 +5020,27 @@ vec_vperm(vector float __a, vector float __b, vector unsigned char __c)
return vec_perm(__a, __b, __c);
}
+#ifdef __VSX__
+static vector long long __ATTRS_o_ai
+vec_vperm(vector long long __a, vector long long __b, vector unsigned char __c)
+{
+ return vec_perm(__a, __b, __c);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vperm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c)
+{
+ return vec_perm(__a, __b, __c);
+}
+
+static vector double __ATTRS_o_ai
+vec_vperm(vector double __a, vector double __b, vector unsigned char __c)
+{
+ return vec_perm(__a, __b, __c);
+}
+#endif
+
/* vec_re */
static vector float __attribute__((__always_inline__))
@@ -8368,11 +8664,11 @@ vec_sum2s(vector int __a, vector int __b)
#ifdef __LITTLE_ENDIAN__
vector int __c = (vector signed int)
vec_perm(__b, __b, (vector unsigned char)
- (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
__c = __builtin_altivec_vsum2sws(__a, __c);
return (vector signed int)
vec_perm(__c, __c, (vector unsigned char)
- (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
#else
return __builtin_altivec_vsum2sws(__a, __b);
#endif
@@ -8386,11 +8682,11 @@ vec_vsum2sws(vector int __a, vector int __b)
#ifdef __LITTLE_ENDIAN__
vector int __c = (vector signed int)
vec_perm(__b, __b, (vector unsigned char)
- (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
__c = __builtin_altivec_vsum2sws(__a, __c);
return (vector signed int)
vec_perm(__c, __c, (vector unsigned char)
- (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
#else
return __builtin_altivec_vsum2sws(__a, __b);
#endif
@@ -8661,6 +8957,91 @@ vec_vupklsh(vector pixel __a)
#endif
}
+/* vec_vsx_ld */
+
+#ifdef __VSX__
+
+static vector signed int __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed int *__b)
+{
+ return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned int *__b)
+{
+ return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static vector float __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector float *__b)
+{
+ return (vector float)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed long long *__b)
+{
+ return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned long long *__b)
+{
+ return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static vector double __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector double *__b)
+{
+ return (vector double)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+#endif
+
+/* vec_vsx_st */
+
+#ifdef __VSX__
+
+static void __ATTRS_o_ai
+vec_vsx_st(vector signed int __a, int __b, vector signed int *__c)
+{
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai
+vec_vsx_st(vector unsigned int __a, int __b, vector unsigned int *__c)
+{
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai
+vec_vsx_st(vector float __a, int __b, vector float *__c)
+{
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai
+vec_vsx_st(vector signed long long __a, int __b, vector signed long long *__c)
+{
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai
+vec_vsx_st(vector unsigned long long __a, int __b,
+ vector unsigned long long *__c)
+{
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai
+vec_vsx_st(vector double __a, int __b, vector double *__c)
+{
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+#endif
+
/* vec_xor */
#define __builtin_altivec_vxor vec_xor
diff --git a/lib/Headers/arm_acle.h b/lib/Headers/arm_acle.h
index a0fd6894ab96..814df2c3d782 100644
--- a/lib/Headers/arm_acle.h
+++ b/lib/Headers/arm_acle.h
@@ -66,6 +66,41 @@ static __inline__ void __attribute__((always_inline, nodebug)) __yield(void) {
}
#endif
+#if __ARM_32BIT_STATE
+#define __dbg(t) __builtin_arm_dbg(t)
+#endif
+
+/* 8.5 Swap */
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __swp(uint32_t x, volatile uint32_t *p) {
+ uint32_t v;
+ do v = __builtin_arm_ldrex(p); while (__builtin_arm_strex(x, p));
+ return v;
+}
+
+/* 8.6 Memory prefetch intrinsics */
+/* 8.6.1 Data prefetch */
+#define __pld(addr) __pldx(0, 0, 0, addr)
+
+#if __ARM_32BIT_STATE
+#define __pldx(access_kind, cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, access_kind, 1)
+#else
+#define __pldx(access_kind, cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
+#endif
+
+/* 8.6.2 Instruction prefetch */
+#define __pli(addr) __plix(0, 0, addr)
+
+#if __ARM_32BIT_STATE
+#define __plix(cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, 0, 0)
+#else
+#define __plix(cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
+#endif
+
/* 8.7 NOP */
static __inline__ void __attribute__((always_inline, nodebug)) __nop(void) {
__builtin_arm_nop();
@@ -73,6 +108,32 @@ static __inline__ void __attribute__((always_inline, nodebug)) __nop(void) {
/* 9 DATA-PROCESSING INTRINSICS */
/* 9.2 Miscellaneous data-processing intrinsics */
+/* ROR */
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __ror(uint32_t x, uint32_t y) {
+ y %= 32;
+ if (y == 0) return x;
+ return (x >> y) | (x << (32 - y));
+}
+
+static __inline__ uint64_t __attribute__((always_inline, nodebug))
+ __rorll(uint64_t x, uint32_t y) {
+ y %= 64;
+ if (y == 0) return x;
+ return (x >> y) | (x << (64 - y));
+}
+
+static __inline__ unsigned long __attribute__((always_inline, nodebug))
+ __rorl(unsigned long x, uint32_t y) {
+#if __SIZEOF_LONG__ == 4
+ return __ror(x, y);
+#else
+ return __rorll(x, y);
+#endif
+}
+
+
+/* CLZ */
static __inline__ uint32_t __attribute__((always_inline, nodebug))
__clz(uint32_t t) {
return __builtin_clz(t);
@@ -85,13 +146,10 @@ static __inline__ unsigned long __attribute__((always_inline, nodebug))
static __inline__ uint64_t __attribute__((always_inline, nodebug))
__clzll(uint64_t t) {
-#if __SIZEOF_LONG_LONG__ == 8
return __builtin_clzll(t);
-#else
- return __builtin_clzl(t);
-#endif
}
+/* REV */
static __inline__ uint32_t __attribute__((always_inline, nodebug))
__rev(uint32_t t) {
return __builtin_bswap32(t);
@@ -111,6 +169,53 @@ static __inline__ uint64_t __attribute__((always_inline, nodebug))
return __builtin_bswap64(t);
}
+/* REV16 */
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __rev16(uint32_t t) {
+ return __ror(__rev(t), 16);
+}
+
+static __inline__ unsigned long __attribute__((always_inline, nodebug))
+ __rev16l(unsigned long t) {
+ return __rorl(__revl(t), sizeof(long) / 2);
+}
+
+static __inline__ uint64_t __attribute__((always_inline, nodebug))
+ __rev16ll(uint64_t t) {
+ return __rorll(__revll(t), 32);
+}
+
+/* REVSH */
+static __inline__ int16_t __attribute__((always_inline, nodebug))
+ __revsh(int16_t t) {
+ return __builtin_bswap16(t);
+}
+
+/* RBIT */
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __rbit(uint32_t t) {
+ return __builtin_arm_rbit(t);
+}
+
+static __inline__ uint64_t __attribute__((always_inline, nodebug))
+ __rbitll(uint64_t t) {
+#if __ARM_32BIT_STATE
+ return (((uint64_t) __builtin_arm_rbit(t)) << 32) |
+ __builtin_arm_rbit(t >> 32);
+#else
+ return __builtin_arm_rbit64(t);
+#endif
+}
+
+static __inline__ unsigned long __attribute__((always_inline, nodebug))
+ __rbitl(unsigned long t) {
+#if __SIZEOF_LONG__ == 4
+ return __rbit(t);
+#else
+ return __rbitll(t);
+#endif
+}
+
/*
* 9.4 Saturating intrinsics
*
diff --git a/lib/Headers/avx512bwintrin.h b/lib/Headers/avx512bwintrin.h
new file mode 100644
index 000000000000..bc4d4ac6afdd
--- /dev/null
+++ b/lib/Headers/avx512bwintrin.h
@@ -0,0 +1,60 @@
+/*===------------- avx512bwintrin.h - AVX512BW intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __AVX512BWINTRIN_H
+#define __AVX512BWINTRIN_H
+
+typedef unsigned int __mmask32;
+typedef unsigned long long __mmask64;
+typedef char __v64qi __attribute__ ((vector_size (64)));
+typedef short __v32hi __attribute__ ((__vector_size__ (64)));
+
+
+/* Integer compare */
+
+static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__))
+_mm512_cmpeq_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__a, (__v64qi)__b,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__))
+_mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__a, (__v64qi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__))
+_mm512_cmpeq_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqw512_mask((__v32hi)__a, (__v32hi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__))
+_mm512_mask_cmpeq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqw512_mask((__v32hi)__a, (__v32hi)__b,
+ __u);
+}
+
+#endif
diff --git a/lib/Headers/avx512erintrin.h b/lib/Headers/avx512erintrin.h
new file mode 100644
index 000000000000..1a5ea153adf3
--- /dev/null
+++ b/lib/Headers/avx512erintrin.h
@@ -0,0 +1,112 @@
+/*===---- avx512fintrin.h - AVX2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512erintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512ERINTRIN_H
+#define __AVX512ERINTRIN_H
+
+
+// rsqrt28
+static __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_rsqrt28_round_pd (__m512d __A, int __R)
+{
+ return (__m512d)__builtin_ia32_rsqrt28pd_mask ((__v8df)__A,
+ (__v8df)_mm512_setzero_pd(),
+ (__mmask8)-1,
+ __R);
+}
+static __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_rsqrt28_round_ps(__m512 __A, int __R)
+{
+ return (__m512)__builtin_ia32_rsqrt28ps_mask ((__v16sf)__A,
+ (__v16sf)_mm512_setzero_ps(),
+ (__mmask16)-1,
+ __R);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_rsqrt28_round_ss(__m128 __A, __m128 __B, int __R)
+{
+ return (__m128) __builtin_ia32_rsqrt28ss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1,
+ __R);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_rsqrt28_round_sd (__m128d __A, __m128d __B, int __R)
+{
+ return (__m128d) __builtin_ia32_rsqrt28sd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1,
+ __R);
+}
+
+
+// rcp28
+static __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_rcp28_round_pd (__m512d __A, int __R)
+{
+ return (__m512d)__builtin_ia32_rcp28pd_mask ((__v8df)__A,
+ (__v8df)_mm512_setzero_pd(),
+ (__mmask8)-1,
+ __R);
+}
+
+static __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_rcp28_round_ps (__m512 __A, int __R)
+{
+ return (__m512)__builtin_ia32_rcp28ps_mask ((__v16sf)__A,
+ (__v16sf)_mm512_setzero_ps (),
+ (__mmask16)-1,
+ __R);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_rcp28_round_ss (__m128 __A, __m128 __B, int __R)
+{
+ return (__m128) __builtin_ia32_rcp28ss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1,
+ __R);
+}
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_rcp28_round_sd (__m128d __A, __m128d __B, int __R)
+{
+ return (__m128d) __builtin_ia32_rcp28sd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1,
+ __R);
+}
+
+#endif // __AVX512ERINTRIN_H
diff --git a/lib/Headers/avx512fintrin.h b/lib/Headers/avx512fintrin.h
new file mode 100644
index 000000000000..9c80710110b0
--- /dev/null
+++ b/lib/Headers/avx512fintrin.h
@@ -0,0 +1,1036 @@
+/*===---- avx512fintrin.h - AVX2 intrinsics --------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512fintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512FINTRIN_H
+#define __AVX512FINTRIN_H
+
+typedef double __v8df __attribute__((__vector_size__(64)));
+typedef float __v16sf __attribute__((__vector_size__(64)));
+typedef long long __v8di __attribute__((__vector_size__(64)));
+typedef int __v16si __attribute__((__vector_size__(64)));
+
+typedef float __m512 __attribute__((__vector_size__(64)));
+typedef double __m512d __attribute__((__vector_size__(64)));
+typedef long long __m512i __attribute__((__vector_size__(64)));
+
+typedef unsigned char __mmask8;
+typedef unsigned short __mmask16;
+
+/* Rounding mode macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+/* Create vectors with repeated elements */
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_setzero_si512(void)
+{
+ return (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
+{
+ return (__m512i) __builtin_ia32_pbroadcastd512_gpr_mask (__A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_maskz_set1_epi64(__mmask8 __M, long long __A)
+{
+#ifdef __x86_64__
+ return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+#else
+ return (__m512i) __builtin_ia32_pbroadcastq512_mem_mask (__A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+#endif
+}
+
+static __inline __m512 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_setzero_ps(void)
+{
+ return (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+}
+static __inline __m512d __attribute__ ((__always_inline__, __nodebug__))
+_mm512_setzero_pd(void)
+{
+ return (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+}
+
+static __inline __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_set1_ps(float __w)
+{
+ return (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_set1_pd(double __w)
+{
+ return (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m512i __attribute__((__always_inline__, __nodebug__))
+_mm512_set1_epi32(int __s)
+{
+ return (__m512i)(__v16si){ __s, __s, __s, __s, __s, __s, __s, __s,
+ __s, __s, __s, __s, __s, __s, __s, __s };
+}
+
+static __inline __m512i __attribute__((__always_inline__, __nodebug__))
+_mm512_set1_epi64(long long __d)
+{
+ return (__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
+}
+
+static __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_broadcastss_ps(__m128 __X)
+{
+ float __f = __X[0];
+ return (__v16sf){ __f, __f, __f, __f,
+ __f, __f, __f, __f,
+ __f, __f, __f, __f,
+ __f, __f, __f, __f };
+}
+
+static __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_broadcastsd_pd(__m128d __X)
+{
+ double __d = __X[0];
+ return (__v8df){ __d, __d, __d, __d,
+ __d, __d, __d, __d };
+}
+
+/* Cast between vector types */
+
+static __inline __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_castpd256_pd512(__m256d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
+}
+
+static __inline __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_castps256_ps512(__m256 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+}
+
+static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm512_castpd512_pd128(__m512d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm512_castps512_ps128(__m512 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
+}
+
+/* Arithmetic */
+
+static __inline __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_add_pd(__m512d __a, __m512d __b)
+{
+ return __a + __b;
+}
+
+static __inline __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_add_ps(__m512 __a, __m512 __b)
+{
+ return __a + __b;
+}
+
+static __inline __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_mul_pd(__m512d __a, __m512d __b)
+{
+ return __a * __b;
+}
+
+static __inline __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_mul_ps(__m512 __a, __m512 __b)
+{
+ return __a * __b;
+}
+
+static __inline __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_sub_pd(__m512d __a, __m512d __b)
+{
+ return __a - __b;
+}
+
+static __inline __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_sub_ps(__m512 __a, __m512 __b)
+{
+ return __a - __b;
+}
+
+static __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_max_pd(__m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_max_ps(__m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512i
+__attribute__ ((__always_inline__, __nodebug__))
+_mm512_max_epi32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_max_epu32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_max_epi64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_max_epu64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_min_pd(__m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_min_ps(__m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512i
+__attribute__ ((__always_inline__, __nodebug__))
+_mm512_min_epi32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_min_epu32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_min_epi64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_min_epu64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_mul_epi32(__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_mul_epu32(__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_sqrt_pd(__m512d a)
+{
+ return (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)a,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_sqrt_ps(__m512 a)
+{
+ return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)a,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_rsqrt14_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);}
+
+static __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_rsqrt14_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_rsqrt14_ss(__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_rsqrt14_sd(__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_rcp14_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_rcp14_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_rcp14_ss(__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_rcp14_sd(__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline __m512 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_floor_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
+ _MM_FROUND_FLOOR,
+ (__v16sf) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512d __attribute__ ((__always_inline__, __nodebug__))
+_mm512_floor_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
+ _MM_FROUND_FLOOR,
+ (__v8df) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_ceil_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
+ _MM_FROUND_CEIL,
+ (__v16sf) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512d __attribute__ ((__always_inline__, __nodebug__))
+_mm512_ceil_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
+ _MM_FROUND_CEIL,
+ (__v8df) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512i __attribute__ (( __always_inline__, __nodebug__))
+_mm512_abs_epi64(__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __attribute__ (( __always_inline__, __nodebug__))
+_mm512_abs_epi32(__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_roundscale_ps(__m512 __A, const int __imm)
+{
+ return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, __imm,
+ (__v16sf) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+static __inline __m512d __attribute__ ((__always_inline__, __nodebug__))
+_mm512_roundscale_pd(__m512d __A, const int __imm)
+{
+ return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, __imm,
+ (__v8df) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d)
+ __builtin_ia32_vfmaddpd512_mask(__A,
+ __B,
+ __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d)
+ __builtin_ia32_vfmsubpd512_mask(__A,
+ __B,
+ __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d)
+ __builtin_ia32_vfnmaddpd512_mask(__A,
+ __B,
+ __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512)
+ __builtin_ia32_vfmaddps512_mask(__A,
+ __B,
+ __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512)
+ __builtin_ia32_vfmsubps512_mask(__A,
+ __B,
+ __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512)
+ __builtin_ia32_vfnmaddps512_mask(__A,
+ __B,
+ __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+/* Vector permutations */
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I
+ /* idx */ ,
+ (__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) -1);
+}
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I
+ /* idx */ ,
+ (__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) -1);
+}
+
+static __inline __m512d __attribute__ ((__always_inline__, __nodebug__))
+_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I
+ /* idx */ ,
+ (__v8df) __A,
+ (__v8df) __B,
+ (__mmask8) -1);
+}
+static __inline __m512 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
+{
+ return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I
+ /* idx */ ,
+ (__v16sf) __A,
+ (__v16sf) __B,
+ (__mmask16) -1);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_valign_epi64(__m512i __A, __m512i __B, const int __I)
+{
+ return (__m512i) __builtin_ia32_alignq512_mask((__v8di)__A,
+ (__v8di)__B,
+ __I,
+ (__v8di)_mm512_setzero_si512(),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_valign_epi32(__m512i __A, __m512i __B, const int __I)
+{
+ return (__m512i)__builtin_ia32_alignd512_mask((__v16si)__A,
+ (__v16si)__B,
+ __I,
+ (__v16si)_mm512_setzero_si512(),
+ (__mmask16) -1);
+}
+
+/* Vector Blend */
+
+static __inline __m512d __attribute__ ((__always_inline__, __nodebug__))
+_mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
+{
+ return (__m512d) __builtin_ia32_blendmpd_512_mask ((__v8df) __A,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline __m512 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
+{
+ return (__m512) __builtin_ia32_blendmps_512_mask ((__v16sf) __A,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_blendmq_512_mask ((__v8di) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_blendmd_512_mask ((__v16si) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+/* Compare */
+
+static __inline __mmask16 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_cmp_ps_mask(__m512 a, __m512 b, const int p)
+{
+ return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) a,
+ (__v16sf) b, p, (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __mmask8 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_cmp_pd_mask(__m512d __X, __m512d __Y, const int __P)
+{
+ return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X,
+ (__v8df) __Y, __P,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+/* Conversion */
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_cvttps_epu32(__m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512 __attribute__ (( __always_inline__, __nodebug__))
+_mm512_cvt_roundepi32_ps(__m512i __A, const int __R)
+{
+ return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ __R);
+}
+
+static __inline __m512 __attribute__ (( __always_inline__, __nodebug__))
+_mm512_cvt_roundepu32_ps(__m512i __A, const int __R)
+{
+ return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ __R);
+}
+
+static __inline __m512d __attribute__ (( __always_inline__, __nodebug__))
+_mm512_cvtepi32_pd(__m256i __A)
+{
+ return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline __m512d __attribute__ (( __always_inline__, __nodebug__))
+_mm512_cvtepu32_pd(__m256i __A)
+{
+ return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+static __inline __m256 __attribute__ (( __always_inline__, __nodebug__))
+_mm512_cvt_roundpd_ps(__m512d __A, const int __R)
+{
+ return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1,
+ __R);
+}
+
+static __inline __m256i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_cvtps_ph(__m512 __A, const int __I)
+{
+ return (__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf) __A,
+ __I,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ -1);
+}
+
+static __inline __m512 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_cvtph_ps(__m256i __A)
+{
+ return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512i __attribute__((__always_inline__, __nodebug__))
+_mm512_cvttps_epi32(__m512 a)
+{
+ return (__m512i)
+ __builtin_ia32_cvttps2dq512_mask((__v16sf) a,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm512_cvttpd_epi32(__m512d a)
+{
+ return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) a,
+ (__v8si)_mm256_setzero_si256(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m256i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_cvtt_roundpd_epi32(__m512d __A, const int __R)
+{
+ return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_cvtt_roundps_epi32(__m512 __A, const int __R)
+{
+ return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1,
+ __R);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_cvt_roundps_epi32(__m512 __A, const int __R)
+{
+ return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1,
+ __R);
+}
+static __inline __m256i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_cvt_roundpd_epi32(__m512d __A, const int __R)
+{
+ return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_cvt_roundps_epu32(__m512 __A, const int __R)
+{
+ return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1,
+ __R);
+}
+static __inline __m256i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_cvt_roundpd_epu32(__m512d __A, const int __R)
+{
+ return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+/* Unpack and Interleave */
+static __inline __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_unpackhi_pd(__m512d __a, __m512d __b)
+{
+ return __builtin_shufflevector(__a, __b, 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
+}
+
+static __inline __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_unpacklo_pd(__m512d __a, __m512d __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
+}
+
+static __inline __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_unpackhi_ps(__m512 __a, __m512 __b)
+{
+ return __builtin_shufflevector(__a, __b,
+ 2, 18, 3, 19,
+ 2+4, 18+4, 3+4, 19+4,
+ 2+8, 18+8, 3+8, 19+8,
+ 2+12, 18+12, 3+12, 19+12);
+}
+
+static __inline __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_unpacklo_ps(__m512 __a, __m512 __b)
+{
+ return __builtin_shufflevector(__a, __b,
+ 0, 16, 1, 17,
+ 0+4, 16+4, 1+4, 17+4,
+ 0+8, 16+8, 1+8, 17+8,
+ 0+12, 16+12, 1+12, 17+12);
+}
+
+/* Bit Test */
+
+static __inline __mmask16 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_test_epi32_mask(__m512i __A, __m512i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestmd512 ((__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) -1);
+}
+
+static __inline __mmask8 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_test_epi64_mask(__m512i __A, __m512i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmq512 ((__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) -1);
+}
+
+/* SIMD load ops */
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddqusi512_mask ((const __v16si *)__P,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline __m512i __attribute__ ((__always_inline__, __nodebug__))
+_mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddqudi512_mask ((const __v8di *)__P,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline __m512 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_maskz_loadu_ps(__mmask16 __U, void const *__P)
+{
+ return (__m512) __builtin_ia32_loadups512_mask ((const __v16sf *)__P,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline __m512d __attribute__ ((__always_inline__, __nodebug__))
+_mm512_maskz_loadu_pd(__mmask8 __U, void const *__P)
+{
+ return (__m512d) __builtin_ia32_loadupd512_mask ((const __v8df *)__P,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline __m512d __attribute__((__always_inline__, __nodebug__))
+_mm512_loadu_pd(double const *__p)
+{
+ struct __loadu_pd {
+ __m512d __v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_pd*)__p)->__v;
+}
+
+static __inline __m512 __attribute__((__always_inline__, __nodebug__))
+_mm512_loadu_ps(float const *__p)
+{
+ struct __loadu_ps {
+ __m512 __v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_ps*)__p)->__v;
+}
+
+/* SIMD store ops */
+
+static __inline void __attribute__ ((__always_inline__, __nodebug__))
+_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
+{
+ __builtin_ia32_storedqudi512_mask ((__v8di *)__P, (__v8di) __A,
+ (__mmask8) __U);
+}
+
+static __inline void __attribute__ ((__always_inline__, __nodebug__))
+_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
+{
+ __builtin_ia32_storedqusi512_mask ((__v16si *)__P, (__v16si) __A,
+ (__mmask16) __U);
+}
+
+static __inline void __attribute__ ((__always_inline__, __nodebug__))
+_mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A)
+{
+ __builtin_ia32_storeupd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
+}
+
+static __inline void __attribute__ ((__always_inline__, __nodebug__))
+_mm512_storeu_pd(void *__P, __m512d __A)
+{
+ __builtin_ia32_storeupd512_mask((__v8df *)__P, (__v8df)__A, (__mmask8)-1);
+}
+
+static __inline void __attribute__ ((__always_inline__, __nodebug__))
+_mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A)
+{
+ __builtin_ia32_storeups512_mask ((__v16sf *)__P, (__v16sf) __A,
+ (__mmask16) __U);
+}
+
+static __inline void __attribute__ ((__always_inline__, __nodebug__))
+_mm512_storeu_ps(void *__P, __m512 __A)
+{
+ __builtin_ia32_storeups512_mask((__v16sf *)__P, (__v16sf)__A, (__mmask16)-1);
+}
+
+static __inline void __attribute__ ((__always_inline__, __nodebug__))
+_mm512_store_ps(void *__P, __m512 __A)
+{
+ *(__m512*)__P = __A;
+}
+
+static __inline void __attribute__ ((__always_inline__, __nodebug__))
+_mm512_store_pd(void *__P, __m512d __A)
+{
+ *(__m512d*)__P = __A;
+}
+
+/* Mask ops */
+
+static __inline __mmask16 __attribute__ ((__always_inline__, __nodebug__))
+_mm512_knot(__mmask16 __M)
+{
+ return __builtin_ia32_knothi(__M);
+}
+
+/* Integer compare */
+
+static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__))
+_mm512_cmpeq_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqd512_mask((__v16si)__a, (__v16si)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__))
+_mm512_mask_cmpeq_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqd512_mask((__v16si)__a, (__v16si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__))
+_mm512_mask_cmpeq_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__a, (__v8di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__))
+_mm512_cmpeq_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__a, (__v8di)__b,
+ (__mmask8)-1);
+}
+
+#endif // __AVX512FINTRIN_H
diff --git a/lib/Headers/avx512vlbwintrin.h b/lib/Headers/avx512vlbwintrin.h
new file mode 100644
index 000000000000..11333f851756
--- /dev/null
+++ b/lib/Headers/avx512vlbwintrin.h
@@ -0,0 +1,83 @@
+/*===---- avx512vlbwintrin.h - AVX512VL and AVX512BW intrinsics ----------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlbwintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLBWINTRIN_H
+#define __AVX512VLBWINTRIN_H
+
+/* Integer compare */
+
+static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__))
+_mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b,
+ __u);
+}
+
+
+static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqb256_mask((__v32qi)__a, (__v32qi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__))
+_mm256_mask_cmpeq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqb256_mask((__v32qi)__a, (__v32qi)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqw128_mask((__v8hi)__a, (__v8hi)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__))
+_mm_mask_cmpeq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqw128_mask((__v8hi)__a, (__v8hi)__b,
+ __u);
+}
+
+
+static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqw256_mask((__v16hi)__a, (__v16hi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__))
+_mm256_mask_cmpeq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqw256_mask((__v16hi)__a, (__v16hi)__b,
+ __u);
+}
+
+#endif /* __AVX512VLBWINTRIN_H */
diff --git a/lib/Headers/avx512vlintrin.h b/lib/Headers/avx512vlintrin.h
new file mode 100644
index 000000000000..8a374b102676
--- /dev/null
+++ b/lib/Headers/avx512vlintrin.h
@@ -0,0 +1,83 @@
+/*===---- avx512vlintrin.h - AVX512VL intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLINTRIN_H
+#define __AVX512VLINTRIN_H
+
+/* Integer compare */
+
+static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__a, (__v4si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__))
+_mm_mask_cmpeq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__a, (__v4si)__b,
+ __u);
+}
+
+
+static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__a, (__v8si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__))
+_mm256_mask_cmpeq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__a, (__v8si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__))
+_mm_mask_cmpeq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b,
+ __u);
+}
+
+
+static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__a, (__v4di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__))
+_mm256_mask_cmpeq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__a, (__v4di)__b,
+ __u);
+}
+
+#endif /* __AVX512VLINTRIN_H */
diff --git a/lib/Headers/bmiintrin.h b/lib/Headers/bmiintrin.h
index 43c4a5e5de3c..0e5fd5551fb3 100644
--- a/lib/Headers/bmiintrin.h
+++ b/lib/Headers/bmiintrin.h
@@ -43,7 +43,7 @@
static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
__tzcnt_u16(unsigned short __X)
{
- return __builtin_ctzs(__X);
+ return __X ? __builtin_ctzs(__X) : 16;
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
@@ -87,7 +87,7 @@ __blsr_u32(unsigned int __X)
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__tzcnt_u32(unsigned int __X)
{
- return __builtin_ctz(__X);
+ return __X ? __builtin_ctz(__X) : 32;
}
#ifdef __x86_64__
@@ -140,7 +140,7 @@ __blsr_u64(unsigned long long __X)
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__tzcnt_u64(unsigned long long __X)
{
- return __builtin_ctzll(__X);
+ return __X ? __builtin_ctzll(__X) : 64;
}
#endif /* __x86_64__ */
diff --git a/lib/Headers/cpuid.h b/lib/Headers/cpuid.h
index f9254e9738e7..5da02e0e5152 100644
--- a/lib/Headers/cpuid.h
+++ b/lib/Headers/cpuid.h
@@ -25,6 +25,60 @@
#error this header is for x86 only
#endif
+/* Responses identification request with %eax 0 */
+/* AMD: "AuthenticAMD" */
+#define signature_AMD_ebx 0x68747541
+#define signature_AMD_edx 0x69746e65
+#define signature_AMD_ecx 0x444d4163
+/* CENTAUR: "CentaurHauls" */
+#define signature_CENTAUR_ebx 0x746e6543
+#define signature_CENTAUR_edx 0x48727561
+#define signature_CENTAUR_ecx 0x736c7561
+/* CYRIX: "CyrixInstead" */
+#define signature_CYRIX_ebx 0x69727943
+#define signature_CYRIX_edx 0x736e4978
+#define signature_CYRIX_ecx 0x64616574
+/* INTEL: "GenuineIntel" */
+#define signature_INTEL_ebx 0x756e6547
+#define signature_INTEL_edx 0x49656e69
+#define signature_INTEL_ecx 0x6c65746e
+/* TM1: "TransmetaCPU" */
+#define signature_TM1_ebx 0x6e617254
+#define signature_TM1_edx 0x74656d73
+#define signature_TM1_ecx 0x55504361
+/* TM2: "GenuineTMx86" */
+#define signature_TM2_ebx 0x756e6547
+#define signature_TM2_edx 0x54656e69
+#define signature_TM2_ecx 0x3638784d
+/* NSC: "Geode by NSC" */
+#define signature_NSC_ebx 0x646f6547
+#define signature_NSC_edx 0x43534e20
+#define signature_NSC_ecx 0x79622065
+/* NEXGEN: "NexGenDriven" */
+#define signature_NEXGEN_ebx 0x4778654e
+#define signature_NEXGEN_edx 0x72446e65
+#define signature_NEXGEN_ecx 0x6e657669
+/* RISE: "RiseRiseRise" */
+#define signature_RISE_ebx 0x65736952
+#define signature_RISE_edx 0x65736952
+#define signature_RISE_ecx 0x65736952
+/* SIS: "SiS SiS SiS " */
+#define signature_SIS_ebx 0x20536953
+#define signature_SIS_edx 0x20536953
+#define signature_SIS_ecx 0x20536953
+/* UMC: "UMC UMC UMC " */
+#define signature_UMC_ebx 0x20434d55
+#define signature_UMC_edx 0x20434d55
+#define signature_UMC_ecx 0x20434d55
+/* VIA: "VIA VIA VIA " */
+#define signature_VIA_ebx 0x20414956
+#define signature_VIA_edx 0x20414956
+#define signature_VIA_ecx 0x20414956
+/* VORTEX: "Vortex86 SoC" */
+#define signature_VORTEX_ebx 0x74726f56
+#define signature_VORTEX_edx 0x36387865
+#define signature_VORTEX_ecx 0x436f5320
+
/* Features in %ecx for level 1 */
#define bit_SSE3 0x00000001
#define bit_PCLMULQDQ 0x00000002
@@ -53,7 +107,7 @@
#define bit_XSAVE 0x04000000
#define bit_OSXSAVE 0x08000000
#define bit_AVX 0x10000000
-#define bit_RDRAND 0x40000000
+#define bit_RDRND 0x40000000
/* Features in %edx for level 1 */
#define bit_FPU 0x00000001
@@ -92,31 +146,29 @@
#define bit_SMEP 0x00000080
#define bit_ENH_MOVSB 0x00000200
-/* PIC on i386 uses %ebx, so preserve it. */
#if __i386__
#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \
- __asm(" pushl %%ebx\n" \
+ __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level))
+
+#define __cpuid_count(__level, __count, __eax, __ebx, __ecx, __edx) \
+ __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level), "2"(__count))
+#else
+/* x86-64 uses %rbx as the base register, so preserve it. */
+#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \
+ __asm(" xchgq %%rbx,%q1\n" \
" cpuid\n" \
- " mov %%ebx,%1\n" \
- " popl %%ebx" \
+ " xchgq %%rbx,%q1" \
: "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
: "0"(__level))
#define __cpuid_count(__level, __count, __eax, __ebx, __ecx, __edx) \
- __asm(" pushl %%ebx\n" \
+ __asm(" xchgq %%rbx,%q1\n" \
" cpuid\n" \
- " mov %%ebx,%1\n" \
- " popl %%ebx" \
+ " xchgq %%rbx,%q1" \
: "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
: "0"(__level), "2"(__count))
-#else
-#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \
- __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
- : "0"(__level))
-
-#define __cpuid_count(__level, __count, __eax, __ebx, __ecx, __edx) \
- __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
- : "0"(__level), "2"(__count))
#endif
static __inline int __get_cpuid (unsigned int __level, unsigned int *__eax,
diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h
index b3f8569524b9..28d004309cf4 100644
--- a/lib/Headers/emmintrin.h
+++ b/lib/Headers/emmintrin.h
@@ -155,148 +155,148 @@ _mm_xor_pd(__m128d __a, __m128d __b)
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_pd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmppd(__a, __b, 0);
+ return (__m128d)__builtin_ia32_cmpeqpd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmplt_pd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmppd(__a, __b, 1);
+ return (__m128d)__builtin_ia32_cmpltpd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmple_pd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmppd(__a, __b, 2);
+ return (__m128d)__builtin_ia32_cmplepd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_pd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmppd(__b, __a, 1);
+ return (__m128d)__builtin_ia32_cmpltpd(__b, __a);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpge_pd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmppd(__b, __a, 2);
+ return (__m128d)__builtin_ia32_cmplepd(__b, __a);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpord_pd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmppd(__a, __b, 7);
+ return (__m128d)__builtin_ia32_cmpordpd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpunord_pd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmppd(__a, __b, 3);
+ return (__m128d)__builtin_ia32_cmpunordpd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpneq_pd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmppd(__a, __b, 4);
+ return (__m128d)__builtin_ia32_cmpneqpd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpnlt_pd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmppd(__a, __b, 5);
+ return (__m128d)__builtin_ia32_cmpnltpd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpnle_pd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmppd(__a, __b, 6);
+ return (__m128d)__builtin_ia32_cmpnlepd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpngt_pd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmppd(__b, __a, 5);
+ return (__m128d)__builtin_ia32_cmpnltpd(__b, __a);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpnge_pd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmppd(__b, __a, 6);
+ return (__m128d)__builtin_ia32_cmpnlepd(__b, __a);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_sd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmpsd(__a, __b, 0);
+ return (__m128d)__builtin_ia32_cmpeqsd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmplt_sd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmpsd(__a, __b, 1);
+ return (__m128d)__builtin_ia32_cmpltsd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmple_sd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmpsd(__a, __b, 2);
+ return (__m128d)__builtin_ia32_cmplesd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_sd(__m128d __a, __m128d __b)
{
- __m128d __c = __builtin_ia32_cmpsd(__b, __a, 1);
+ __m128d __c = __builtin_ia32_cmpltsd(__b, __a);
return (__m128d) { __c[0], __a[1] };
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpge_sd(__m128d __a, __m128d __b)
{
- __m128d __c = __builtin_ia32_cmpsd(__b, __a, 2);
+ __m128d __c = __builtin_ia32_cmplesd(__b, __a);
return (__m128d) { __c[0], __a[1] };
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpord_sd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmpsd(__a, __b, 7);
+ return (__m128d)__builtin_ia32_cmpordsd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpunord_sd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmpsd(__a, __b, 3);
+ return (__m128d)__builtin_ia32_cmpunordsd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpneq_sd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmpsd(__a, __b, 4);
+ return (__m128d)__builtin_ia32_cmpneqsd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpnlt_sd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmpsd(__a, __b, 5);
+ return (__m128d)__builtin_ia32_cmpnltsd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpnle_sd(__m128d __a, __m128d __b)
{
- return (__m128d)__builtin_ia32_cmpsd(__a, __b, 6);
+ return (__m128d)__builtin_ia32_cmpnlesd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpngt_sd(__m128d __a, __m128d __b)
{
- __m128d __c = __builtin_ia32_cmpsd(__b, __a, 5);
+ __m128d __c = __builtin_ia32_cmpnltsd(__b, __a);
return (__m128d) { __c[0], __a[1] };
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpnge_sd(__m128d __a, __m128d __b)
{
- __m128d __c = __builtin_ia32_cmpsd(__b, __a, 6);
+ __m128d __c = __builtin_ia32_cmpnlesd(__b, __a);
return (__m128d) { __c[0], __a[1] };
}
diff --git a/lib/Headers/float.h b/lib/Headers/float.h
index 02ef6bf611a3..238cf76b053c 100644
--- a/lib/Headers/float.h
+++ b/lib/Headers/float.h
@@ -28,7 +28,7 @@
* additional definitions provided for Windows.
* For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
*/
-#if (defined(__MINGW32__) || defined(_MSC_VER)) && \
+#if (defined(__MINGW32__) || defined(_MSC_VER)) && __STDC_HOSTED__ && \
__has_include_next(<float.h>)
# include_next <float.h>
diff --git a/lib/Headers/immintrin.h b/lib/Headers/immintrin.h
index df4bea8c950e..2400fea499bd 100644
--- a/lib/Headers/immintrin.h
+++ b/lib/Headers/immintrin.h
@@ -76,6 +76,26 @@
#include <fmaintrin.h>
#endif
+#ifdef __AVX512F__
+#include <avx512fintrin.h>
+#endif
+
+#ifdef __AVX512VL__
+#include <avx512vlintrin.h>
+#endif
+
+#ifdef __AVX512BW__
+#include <avx512bwintrin.h>
+#endif
+
+#if defined (__AVX512VL__) && defined (__AVX512BW__)
+#include <avx512vlbwintrin.h>
+#endif
+
+#ifdef __AVX512ER__
+#include <avx512erintrin.h>
+#endif
+
#ifdef __RDRND__
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_rdrand16_step(unsigned short *__p)
@@ -98,6 +118,58 @@ _rdrand64_step(unsigned long long *__p)
#endif
#endif /* __RDRND__ */
+#ifdef __FSGSBASE__
+#ifdef __x86_64__
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_readfsbase_u32(void)
+{
+ return __builtin_ia32_rdfsbase32();
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_readfsbase_u64(void)
+{
+ return __builtin_ia32_rdfsbase64();
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_readgsbase_u32(void)
+{
+ return __builtin_ia32_rdgsbase32();
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_readgsbase_u64(void)
+{
+ return __builtin_ia32_rdgsbase64();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_writefsbase_u32(unsigned int __V)
+{
+ return __builtin_ia32_wrfsbase32(__V);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_writefsbase_u64(unsigned long long __V)
+{
+ return __builtin_ia32_wrfsbase64(__V);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_writegsbase_u32(unsigned int __V)
+{
+ return __builtin_ia32_wrgsbase32(__V);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_writegsbase_u64(unsigned long long __V)
+{
+ return __builtin_ia32_wrgsbase64(__V);
+}
+#endif
+#endif /* __FSGSBASE__ */
+
#ifdef __RTM__
#include <rtmintrin.h>
#endif
@@ -115,4 +187,8 @@ _xtest(void)
#include <shaintrin.h>
#endif
+/* Some intrinsics inside adxintrin.h are available only if __ADX__ defined,
+ * whereas others are also available if __ADX__ undefined */
+#include <adxintrin.h>
+
#endif /* __IMMINTRIN_H */
diff --git a/lib/Headers/lzcntintrin.h b/lib/Headers/lzcntintrin.h
index 62ab5ca2f358..35d6659d245b 100644
--- a/lib/Headers/lzcntintrin.h
+++ b/lib/Headers/lzcntintrin.h
@@ -35,20 +35,32 @@
static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
__lzcnt16(unsigned short __X)
{
- return __builtin_clzs(__X);
+ return __X ? __builtin_clzs(__X) : 16;
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__lzcnt32(unsigned int __X)
{
- return __builtin_clz(__X);
+ return __X ? __builtin_clz(__X) : 32;
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_lzcnt_u32(unsigned int __X)
+{
+ return __X ? __builtin_clz(__X) : 32;
}
#ifdef __x86_64__
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__lzcnt64(unsigned long long __X)
{
- return __builtin_clzll(__X);
+ return __X ? __builtin_clzll(__X) : 64;
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_lzcnt_u64(unsigned long long __X)
+{
+ return __X ? __builtin_clzll(__X) : 64;
}
#endif
diff --git a/lib/Headers/module.modulemap b/lib/Headers/module.modulemap
index 9f7944dedbb0..062464ed2e53 100644
--- a/lib/Headers/module.modulemap
+++ b/lib/Headers/module.modulemap
@@ -1,4 +1,4 @@
-module _Builtin_intrinsics [system] {
+module _Builtin_intrinsics [system] [extern_c] {
explicit module altivec {
requires altivec
header "altivec.h"
@@ -7,6 +7,11 @@ module _Builtin_intrinsics [system] {
explicit module arm {
requires arm
+ explicit module acle {
+ header "arm_acle.h"
+ export *
+ }
+
explicit module neon {
requires neon
header "arm_neon.h"
@@ -96,6 +101,17 @@ module _Builtin_intrinsics [system] {
header "avx2intrin.h"
}
+ explicit module avx512f {
+ requires avx512f
+ export avx2
+ header "avx512fintrin.h"
+ }
+
+ explicit module avx512er {
+ requires avx512er
+ header "avx512erintrin.h"
+ }
+
explicit module bmi {
requires bmi
header "bmiintrin.h"
@@ -154,3 +170,7 @@ module _Builtin_intrinsics [system] {
}
}
}
+
+module _Builtin_stddef_max_align_t [system] [extern_c] {
+ header "__stddef_max_align_t.h"
+}
diff --git a/lib/Headers/shaintrin.h b/lib/Headers/shaintrin.h
index 66ed0554b64d..391a4bbc4f0e 100644
--- a/lib/Headers/shaintrin.h
+++ b/lib/Headers/shaintrin.h
@@ -38,37 +38,37 @@
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)
{
- return __builtin_ia32_sha1nexte(__X, __Y);
+ return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha1msg1_epu32(__m128i __X, __m128i __Y)
{
- return __builtin_ia32_sha1msg1(__X, __Y);
+ return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha1msg2_epu32(__m128i __X, __m128i __Y)
{
- return __builtin_ia32_sha1msg2(__X, __Y);
+ return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z)
{
- return __builtin_ia32_sha256rnds2(__X, __Y, __Z);
+ return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha256msg1_epu32(__m128i __X, __m128i __Y)
{
- return __builtin_ia32_sha256msg1(__X, __Y);
+ return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha256msg2_epu32(__m128i __X, __m128i __Y)
{
- return __builtin_ia32_sha256msg2(__X, __Y);
+ return (__m128i)__builtin_ia32_sha256msg2((__v4si)__X, (__v4si)__Y);
}
#endif /* __SHAINTRIN_H */
diff --git a/lib/Headers/stdatomic.h b/lib/Headers/stdatomic.h
new file mode 100644
index 000000000000..e3c34767a21a
--- /dev/null
+++ b/lib/Headers/stdatomic.h
@@ -0,0 +1,190 @@
+/*===---- stdatomic.h - Standard header for atomic types and operations -----===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_STDATOMIC_H
+#define __CLANG_STDATOMIC_H
+
+/* If we're hosted, fall back to the system's stdatomic.h. FreeBSD, for
+ * example, already has a Clang-compatible stdatomic.h header.
+ */
+#if __STDC_HOSTED__ && __has_include_next(<stdatomic.h>)
+# include_next <stdatomic.h>
+#else
+
+#include <stddef.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* 7.17.1 Introduction */
+
+#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
+#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
+#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
+#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
+#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
+#define ATOMIC_SHORT_T_LOCK_FREE __GCC_ATOMIC_SHORT_T_LOCK_FREE
+#define ATOMIC_INT_T_LOCK_FREE __GCC_ATOMIC_INT_T_LOCK_FREE
+#define ATOMIC_LONG_T_LOCK_FREE __GCC_ATOMIC_LONG_T_LOCK_FREE
+#define ATOMIC_LLONG_T_LOCK_FREE __GCC_ATOMIC_LLONG_T_LOCK_FREE
+#define ATOMIC_POINTER_T_LOCK_FREE __GCC_ATOMIC_POINTER_T_LOCK_FREE
+
+/* 7.17.2 Initialization */
+
+#define ATOMIC_VAR_INIT(value) (value)
+#define atomic_init __c11_atomic_init
+
+/* 7.17.3 Order and consistency */
+
+typedef enum memory_order {
+ memory_order_relaxed = __ATOMIC_RELAXED,
+ memory_order_consume = __ATOMIC_CONSUME,
+ memory_order_acquire = __ATOMIC_ACQUIRE,
+ memory_order_release = __ATOMIC_RELEASE,
+ memory_order_acq_rel = __ATOMIC_ACQ_REL,
+ memory_order_seq_cst = __ATOMIC_SEQ_CST
+} memory_order;
+
+#define kill_dependency(y) (y)
+
+/* 7.17.4 Fences */
+
+// These should be provided by the libc implementation.
+void atomic_thread_fence(memory_order);
+void atomic_signal_fence(memory_order);
+
+#define atomic_thread_fence(order) __c11_atomic_thread_fence(order)
+#define atomic_signal_fence(order) __c11_atomic_signal_fence(order)
+
+/* 7.17.5 Lock-free property */
+
+#define atomic_is_lock_free(obj) __c11_atomic_is_lock_free(sizeof(*(obj)))
+
+/* 7.17.6 Atomic integer types */
+
+#ifdef __cplusplus
+typedef _Atomic(bool) atomic_bool;
+#else
+typedef _Atomic(_Bool) atomic_bool;
+#endif
+typedef _Atomic(char) atomic_char;
+typedef _Atomic(signed char) atomic_schar;
+typedef _Atomic(unsigned char) atomic_uchar;
+typedef _Atomic(short) atomic_short;
+typedef _Atomic(unsigned short) atomic_ushort;
+typedef _Atomic(int) atomic_int;
+typedef _Atomic(unsigned int) atomic_uint;
+typedef _Atomic(long) atomic_long;
+typedef _Atomic(unsigned long) atomic_ulong;
+typedef _Atomic(long long) atomic_llong;
+typedef _Atomic(unsigned long long) atomic_ullong;
+typedef _Atomic(uint_least16_t) atomic_char16_t;
+typedef _Atomic(uint_least32_t) atomic_char32_t;
+typedef _Atomic(wchar_t) atomic_wchar_t;
+typedef _Atomic(int_least8_t) atomic_int_least8_t;
+typedef _Atomic(uint_least8_t) atomic_uint_least8_t;
+typedef _Atomic(int_least16_t) atomic_int_least16_t;
+typedef _Atomic(uint_least16_t) atomic_uint_least16_t;
+typedef _Atomic(int_least32_t) atomic_int_least32_t;
+typedef _Atomic(uint_least32_t) atomic_uint_least32_t;
+typedef _Atomic(int_least64_t) atomic_int_least64_t;
+typedef _Atomic(uint_least64_t) atomic_uint_least64_t;
+typedef _Atomic(int_fast8_t) atomic_int_fast8_t;
+typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t;
+typedef _Atomic(int_fast16_t) atomic_int_fast16_t;
+typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t;
+typedef _Atomic(int_fast32_t) atomic_int_fast32_t;
+typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t;
+typedef _Atomic(int_fast64_t) atomic_int_fast64_t;
+typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t;
+typedef _Atomic(intptr_t) atomic_intptr_t;
+typedef _Atomic(uintptr_t) atomic_uintptr_t;
+typedef _Atomic(size_t) atomic_size_t;
+typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t;
+typedef _Atomic(intmax_t) atomic_intmax_t;
+typedef _Atomic(uintmax_t) atomic_uintmax_t;
+
+/* 7.17.7 Operations on atomic types */
+
+#define atomic_store(object, desired) __c11_atomic_store(object, desired, __ATOMIC_SEQ_CST)
+#define atomic_store_explicit __c11_atomic_store
+
+#define atomic_load(object) __c11_atomic_load(object, __ATOMIC_SEQ_CST)
+#define atomic_load_explicit __c11_atomic_load
+
+#define atomic_exchange(object, desired) __c11_atomic_exchange(object, desired, __ATOMIC_SEQ_CST)
+#define atomic_exchange_explicit __c11_atomic_exchange
+
+#define atomic_compare_exchange_strong(object, expected, desired) __c11_atomic_compare_exchange_strong(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+#define atomic_compare_exchange_strong_explicit __c11_atomic_compare_exchange_strong
+
+#define atomic_compare_exchange_weak(object, expected, desired) __c11_atomic_compare_exchange_weak(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+#define atomic_compare_exchange_weak_explicit __c11_atomic_compare_exchange_weak
+
+#define atomic_fetch_add(object, operand) __c11_atomic_fetch_add(object, operand, __ATOMIC_SEQ_CST)
+#define atomic_fetch_add_explicit __c11_atomic_fetch_add
+
+#define atomic_fetch_sub(object, operand) __c11_atomic_fetch_sub(object, operand, __ATOMIC_SEQ_CST)
+#define atomic_fetch_sub_explicit __c11_atomic_fetch_sub
+
+#define atomic_fetch_or(object, operand) __c11_atomic_fetch_or(object, operand, __ATOMIC_SEQ_CST)
+#define atomic_fetch_or_explicit __c11_atomic_fetch_or
+
+#define atomic_fetch_xor(object, operand) __c11_atomic_fetch_xor(object, operand, __ATOMIC_SEQ_CST)
+#define atomic_fetch_xor_explicit __c11_atomic_fetch_xor
+
+#define atomic_fetch_and(object, operand) __c11_atomic_fetch_and(object, operand, __ATOMIC_SEQ_CST)
+#define atomic_fetch_and_explicit __c11_atomic_fetch_and
+
+/* 7.17.8 Atomic flag type and operations */
+
+typedef struct atomic_flag { atomic_bool _Value; } atomic_flag;
+
+#define ATOMIC_FLAG_INIT { 0 }
+
+// These should be provided by the libc implementation.
+#ifdef __cplusplus
+bool atomic_flag_test_and_set(volatile atomic_flag *);
+bool atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order);
+#else
+_Bool atomic_flag_test_and_set(volatile atomic_flag *);
+_Bool atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order);
+#endif
+void atomic_flag_clear(volatile atomic_flag *);
+void atomic_flag_clear_explicit(volatile atomic_flag *, memory_order);
+
+#define atomic_flag_test_and_set(object) __c11_atomic_exchange(&(object)->_Value, 1, __ATOMIC_SEQ_CST)
+#define atomic_flag_test_and_set_explicit(object, order) __c11_atomic_exchange(&(object)->_Value, 1, order)
+
+#define atomic_flag_clear(object) __c11_atomic_store(&(object)->_Value, 0, __ATOMIC_SEQ_CST)
+#define atomic_flag_clear_explicit(object, order) __c11_atomic_store(&(object)->_Value, 0, order)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __STDC_HOSTED__ */
+#endif /* __CLANG_STDATOMIC_H */
+
diff --git a/lib/Headers/stddef.h b/lib/Headers/stddef.h
index 2dfe0a29b2a2..735499671156 100644
--- a/lib/Headers/stddef.h
+++ b/lib/Headers/stddef.h
@@ -30,11 +30,15 @@
#if !defined(__need_ptrdiff_t) && !defined(__need_size_t) && \
!defined(__need_wchar_t) && !defined(__need_NULL) && \
!defined(__need_wint_t)
+/* Always define miscellaneous pieces when modules are available. */
+#if !__has_feature(modules)
#define __STDDEF_H
+#endif
#define __need_ptrdiff_t
#define __need_size_t
#define __need_wchar_t
#define __need_NULL
+#define __need_STDDEF_H_misc
/* __need_wint_t is intentionally not defined here. */
#endif
@@ -60,7 +64,7 @@ typedef __SIZE_TYPE__ size_t;
#undef __need_size_t
#endif /*defined(__need_size_t) */
-#if defined(__STDDEF_H)
+#if defined(__need_STDDEF_H_misc)
/* ISO9899:2011 7.20 (C11 Annex K): Define rsize_t if __STDC_WANT_LIB_EXT1__ is
* enabled. */
#if (defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1 && \
@@ -71,7 +75,7 @@ typedef __SIZE_TYPE__ size_t;
#endif
typedef __SIZE_TYPE__ rsize_t;
#endif
-#endif /* defined(__STDDEF_H) */
+#endif /* defined(__need_STDDEF_H_misc) */
#if defined(__need_wchar_t)
#ifndef __cplusplus
@@ -109,26 +113,13 @@ using ::std::nullptr_t;
#undef __need_NULL
#endif /* defined(__need_NULL) */
-#if defined(__STDDEF_H)
-
+#if defined(__need_STDDEF_H_misc)
#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L
-#if !defined(__CLANG_MAX_ALIGN_T_DEFINED) || __has_feature(modules)
-#ifndef _MSC_VER
-typedef struct {
- long long __clang_max_align_nonce1
- __attribute__((__aligned__(__alignof__(long long))));
- long double __clang_max_align_nonce2
- __attribute__((__aligned__(__alignof__(long double))));
-} max_align_t;
-#else
-typedef double max_align_t;
+#include "__stddef_max_align_t.h"
#endif
-#define __CLANG_MAX_ALIGN_T_DEFINED
-#endif
-#endif
-
#define offsetof(t, d) __builtin_offsetof(t, d)
-#endif /* __STDDEF_H */
+#undef __need_STDDEF_H_misc
+#endif /* defined(__need_STDDEF_H_misc) */
/* Some C libraries expect to see a wint_t here. Others (notably MinGW) will use
__WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */
diff --git a/lib/Headers/unwind.h b/lib/Headers/unwind.h
index 685c1dfd04e8..90aca16aca39 100644
--- a/lib/Headers/unwind.h
+++ b/lib/Headers/unwind.h
@@ -26,8 +26,8 @@
#ifndef __CLANG_UNWIND_H
#define __CLANG_UNWIND_H
-#if __has_include_next(<unwind.h>)
-/* Darwin (from 11.x on) and libunwind provide an unwind.h. If that's available,
+#if defined(__APPLE__) && __has_include_next(<unwind.h>)
+/* Darwin (from 11.x on) provide an unwind.h. If that's available,
* use it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
* so define that around the include.*/
# ifndef _GNU_SOURCE
@@ -199,6 +199,8 @@ _Unwind_Word _Unwind_GetIPInfo(struct _Unwind_Context *, int *);
_Unwind_Word _Unwind_GetCFA(struct _Unwind_Context *);
+_Unwind_Word _Unwind_GetBSP(struct _Unwind_Context *);
+
void *_Unwind_GetLanguageSpecificData(struct _Unwind_Context *);
_Unwind_Ptr _Unwind_GetRegionStart(struct _Unwind_Context *);
diff --git a/lib/Headers/vadefs.h b/lib/Headers/vadefs.h
new file mode 100644
index 000000000000..7fe9a74e3f69
--- /dev/null
+++ b/lib/Headers/vadefs.h
@@ -0,0 +1,65 @@
+/* ===-------- vadefs.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Only include this if we are aiming for MSVC compatibility. */
+#ifndef _MSC_VER
+#include_next <vadefs.h>
+#else
+
+#ifndef __clang_vadefs_h
+#define __clang_vadefs_h
+
+#include_next <vadefs.h>
+
+/* Override macros from vadefs.h with definitions that work with Clang. */
+#ifdef _crt_va_start
+#undef _crt_va_start
+#define _crt_va_start(ap, param) __builtin_va_start(ap, param)
+#endif
+#ifdef _crt_va_end
+#undef _crt_va_end
+#define _crt_va_end(ap) __builtin_va_end(ap)
+#endif
+#ifdef _crt_va_arg
+#undef _crt_va_arg
+#define _crt_va_arg(ap, type) __builtin_va_arg(ap, type)
+#endif
+
+/* VS 2015 switched to double underscore names, which is an improvement, but now
+ * we have to intercept those names too.
+ */
+#ifdef __crt_va_start
+#undef __crt_va_start
+#define __crt_va_start(ap, param) __builtin_va_start(ap, param)
+#endif
+#ifdef __crt_va_end
+#undef __crt_va_end
+#define __crt_va_end(ap) __builtin_va_end(ap)
+#endif
+#ifdef __crt_va_arg
+#undef __crt_va_arg
+#define __crt_va_arg(ap, type) __builtin_va_arg(ap, type)
+#endif
+
+#endif
+#endif
diff --git a/lib/Headers/xmmintrin.h b/lib/Headers/xmmintrin.h
index c9befcb456f6..d1afe81601c3 100644
--- a/lib/Headers/xmmintrin.h
+++ b/lib/Headers/xmmintrin.h
@@ -182,153 +182,153 @@ _mm_xor_ps(__m128 __a, __m128 __b)
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_ss(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpss(__a, __b, 0);
+ return (__m128)__builtin_ia32_cmpeqss(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_ps(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpps(__a, __b, 0);
+ return (__m128)__builtin_ia32_cmpeqps(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmplt_ss(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpss(__a, __b, 1);
+ return (__m128)__builtin_ia32_cmpltss(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmplt_ps(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpps(__a, __b, 1);
+ return (__m128)__builtin_ia32_cmpltps(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmple_ss(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpss(__a, __b, 2);
+ return (__m128)__builtin_ia32_cmpless(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmple_ps(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpps(__a, __b, 2);
+ return (__m128)__builtin_ia32_cmpleps(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_ss(__m128 __a, __m128 __b)
{
return (__m128)__builtin_shufflevector(__a,
- __builtin_ia32_cmpss(__b, __a, 1),
+ __builtin_ia32_cmpltss(__b, __a),
4, 1, 2, 3);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_ps(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpps(__b, __a, 1);
+ return (__m128)__builtin_ia32_cmpltps(__b, __a);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpge_ss(__m128 __a, __m128 __b)
{
return (__m128)__builtin_shufflevector(__a,
- __builtin_ia32_cmpss(__b, __a, 2),
+ __builtin_ia32_cmpless(__b, __a),
4, 1, 2, 3);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpge_ps(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpps(__b, __a, 2);
+ return (__m128)__builtin_ia32_cmpleps(__b, __a);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpneq_ss(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpss(__a, __b, 4);
+ return (__m128)__builtin_ia32_cmpneqss(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpneq_ps(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpps(__a, __b, 4);
+ return (__m128)__builtin_ia32_cmpneqps(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpnlt_ss(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpss(__a, __b, 5);
+ return (__m128)__builtin_ia32_cmpnltss(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpnlt_ps(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpps(__a, __b, 5);
+ return (__m128)__builtin_ia32_cmpnltps(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpnle_ss(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpss(__a, __b, 6);
+ return (__m128)__builtin_ia32_cmpnless(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpnle_ps(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpps(__a, __b, 6);
+ return (__m128)__builtin_ia32_cmpnleps(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpngt_ss(__m128 __a, __m128 __b)
{
return (__m128)__builtin_shufflevector(__a,
- __builtin_ia32_cmpss(__b, __a, 5),
+ __builtin_ia32_cmpnltss(__b, __a),
4, 1, 2, 3);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpngt_ps(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpps(__b, __a, 5);
+ return (__m128)__builtin_ia32_cmpnltps(__b, __a);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpnge_ss(__m128 __a, __m128 __b)
{
return (__m128)__builtin_shufflevector(__a,
- __builtin_ia32_cmpss(__b, __a, 6),
+ __builtin_ia32_cmpnless(__b, __a),
4, 1, 2, 3);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpnge_ps(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpps(__b, __a, 6);
+ return (__m128)__builtin_ia32_cmpnleps(__b, __a);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpord_ss(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpss(__a, __b, 7);
+ return (__m128)__builtin_ia32_cmpordss(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpord_ps(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpps(__a, __b, 7);
+ return (__m128)__builtin_ia32_cmpordps(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpunord_ss(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpss(__a, __b, 3);
+ return (__m128)__builtin_ia32_cmpunordss(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpunord_ps(__m128 __a, __m128 __b)
{
- return (__m128)__builtin_ia32_cmpps(__a, __b, 3);
+ return (__m128)__builtin_ia32_cmpunordps(__a, __b);
}
static __inline__ int __attribute__((__always_inline__, __nodebug__))
diff --git a/lib/Index/CMakeLists.txt b/lib/Index/CMakeLists.txt
index 1ebb636e8eca..3869c32c879a 100644
--- a/lib/Index/CMakeLists.txt
+++ b/lib/Index/CMakeLists.txt
@@ -13,7 +13,6 @@ add_clang_library(clangIndex
clangAST
clangBasic
clangFormat
- clangLex
clangRewrite
- clangTooling
+ clangToolingCore
)
diff --git a/lib/Index/CommentToXML.cpp b/lib/Index/CommentToXML.cpp
index a67c806550d8..ef6aeefa6526 100644
--- a/lib/Index/CommentToXML.cpp
+++ b/lib/Index/CommentToXML.cpp
@@ -15,7 +15,6 @@
#include "clang/AST/CommentVisitor.h"
#include "clang/Format/Format.h"
#include "clang/Index/USRGeneration.h"
-#include "clang/Lex/Lexer.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Support/raw_ostream.h"
@@ -609,14 +608,9 @@ void CommentASTToXMLConverter::formatTextOfDeclaration(
.getLocWithOffset(0);
unsigned Length = Declaration.size();
- std::vector<CharSourceRange> Ranges(
- 1, CharSourceRange::getCharRange(Start, Start.getLocWithOffset(Length)));
- ASTContext &Context = DI->CurrentDecl->getASTContext();
- const LangOptions &LangOpts = Context.getLangOpts();
- Lexer Lex(ID, FormatRewriterContext.Sources.getBuffer(ID),
- FormatRewriterContext.Sources, LangOpts);
tooling::Replacements Replace = reformat(
- format::getLLVMStyle(), Lex, FormatRewriterContext.Sources, Ranges);
+ format::getLLVMStyle(), FormatRewriterContext.Sources, ID,
+ CharSourceRange::getCharRange(Start, Start.getLocWithOffset(Length)));
applyAllReplacements(Replace, FormatRewriterContext.Rewrite);
Declaration = FormatRewriterContext.getRewrittenText(ID);
}
diff --git a/lib/Index/SimpleFormatContext.h b/lib/Index/SimpleFormatContext.h
index a460863f803e..080a4ad04ddf 100644
--- a/lib/Index/SimpleFormatContext.h
+++ b/lib/Index/SimpleFormatContext.h
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SIMPLE_FORM_CONTEXT_H
-#define LLVM_CLANG_SIMPLE_FORM_CONTEXT_H
+#ifndef LLVM_CLANG_LIB_INDEX_SIMPLEFORMATCONTEXT_H
+#define LLVM_CLANG_LIB_INDEX_SIMPLEFORMATCONTEXT_H
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
@@ -47,10 +47,11 @@ public:
~SimpleFormatContext() { }
FileID createInMemoryFile(StringRef Name, StringRef Content) {
- llvm::MemoryBuffer *Source = llvm::MemoryBuffer::getMemBuffer(Content);
+ std::unique_ptr<llvm::MemoryBuffer> Source =
+ llvm::MemoryBuffer::getMemBuffer(Content);
const FileEntry *Entry =
Files.getVirtualFile(Name, Source->getBufferSize(), 0);
- Sources.overrideFileContents(Entry, Source);
+ Sources.overrideFileContents(Entry, std::move(Source));
assert(Entry != nullptr);
return Sources.createFileID(Entry, SourceLocation(), SrcMgr::C_User);
}
diff --git a/lib/Index/USRGeneration.cpp b/lib/Index/USRGeneration.cpp
index e08b85e9f868..baa166ee2802 100644
--- a/lib/Index/USRGeneration.cpp
+++ b/lib/Index/USRGeneration.cpp
@@ -198,7 +198,9 @@ void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
return;
VisitDeclContext(D->getDeclContext());
+ bool IsTemplate = false;
if (FunctionTemplateDecl *FunTmpl = D->getDescribedFunctionTemplate()) {
+ IsTemplate = true;
Out << "@FT@";
VisitTemplateParameterList(FunTmpl->getTemplateParameters());
} else
@@ -226,12 +228,26 @@ void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
}
if (D->isVariadic())
Out << '.';
+ if (IsTemplate) {
+ // Function templates can be overloaded by return type, for example:
+ // \code
+ // template <class T> typename T::A foo() {}
+ // template <class T> typename T::B foo() {}
+ // \endcode
+ Out << '#';
+ VisitType(D->getReturnType());
+ }
Out << '#';
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
if (MD->isStatic())
Out << 'S';
if (unsigned quals = MD->getTypeQualifiers())
Out << (char)('0' + quals);
+ switch (MD->getRefQualifier()) {
+ case RQ_None: break;
+ case RQ_LValue: Out << '&'; break;
+ case RQ_RValue: Out << "&&"; break;
+ }
}
}
@@ -414,8 +430,8 @@ void USRGenerator::VisitTagDecl(const TagDecl *D) {
switch (D->getTagKind()) {
case TTK_Interface:
+ case TTK_Class:
case TTK_Struct: Out << "@ST"; break;
- case TTK_Class: Out << "@CT"; break;
case TTK_Union: Out << "@UT"; break;
case TTK_Enum: llvm_unreachable("enum template");
}
@@ -426,8 +442,8 @@ void USRGenerator::VisitTagDecl(const TagDecl *D) {
switch (D->getTagKind()) {
case TTK_Interface:
+ case TTK_Class:
case TTK_Struct: Out << "@SP"; break;
- case TTK_Class: Out << "@CP"; break;
case TTK_Union: Out << "@UP"; break;
case TTK_Enum: llvm_unreachable("enum partial specialization");
}
@@ -438,8 +454,8 @@ void USRGenerator::VisitTagDecl(const TagDecl *D) {
if (!AlreadyStarted) {
switch (D->getTagKind()) {
case TTK_Interface:
+ case TTK_Class:
case TTK_Struct: Out << "@S"; break;
- case TTK_Class: Out << "@C"; break;
case TTK_Union: Out << "@U"; break;
case TTK_Enum: Out << "@E"; break;
}
@@ -455,9 +471,13 @@ void USRGenerator::VisitTagDecl(const TagDecl *D) {
Buf[off] = 'A';
Out << '@' << *TD;
}
- else
+ else {
+ if (D->isEmbeddedInDeclarator() && !D->isFreeStanding()) {
+ printLoc(Out, D->getLocation(), Context->getSourceManager(), true);
+ } else
Buf[off] = 'a';
}
+ }
// For a class template specialization, mangle the template arguments.
if (const ClassTemplateSpecializationDecl *Spec
@@ -540,7 +560,6 @@ void USRGenerator::VisitType(QualType T) {
c = 'v'; break;
case BuiltinType::Bool:
c = 'b'; break;
- case BuiltinType::Char_U:
case BuiltinType::UChar:
c = 'c'; break;
case BuiltinType::Char16:
@@ -557,9 +576,11 @@ void USRGenerator::VisitType(QualType T) {
c = 'k'; break;
case BuiltinType::UInt128:
c = 'j'; break;
+ case BuiltinType::Char_U:
case BuiltinType::Char_S:
- case BuiltinType::SChar:
c = 'C'; break;
+ case BuiltinType::SChar:
+ c = 'r'; break;
case BuiltinType::WChar_S:
case BuiltinType::WChar_U:
c = 'W'; break;
@@ -626,6 +647,11 @@ void USRGenerator::VisitType(QualType T) {
T = PT->getPointeeType();
continue;
}
+ if (const RValueReferenceType *RT = T->getAs<RValueReferenceType>()) {
+ Out << "&&";
+ T = RT->getPointeeType();
+ continue;
+ }
if (const ReferenceType *RT = T->getAs<ReferenceType>()) {
Out << '&';
T = RT->getPointeeType();
@@ -668,6 +694,22 @@ void USRGenerator::VisitType(QualType T) {
VisitTemplateArgument(Spec->getArg(I));
return;
}
+ if (const DependentNameType *DNT = T->getAs<DependentNameType>()) {
+ Out << '^';
+ // FIXME: Encode the qualifier, don't just print it.
+ PrintingPolicy PO(Ctx.getLangOpts());
+ PO.SuppressTagKeyword = true;
+ PO.SuppressUnwrittenScope = true;
+ PO.ConstantArraySizeAsWritten = false;
+ PO.AnonymousTagLocations = false;
+ DNT->getQualifier()->print(Out, PO);
+ Out << ':' << DNT->getIdentifier()->getName();
+ return;
+ }
+ if (const InjectedClassNameType *InjT = T->getAs<InjectedClassNameType>()) {
+ T = InjT->getInjectedSpecializationType();
+ continue;
+ }
// Unhandled type.
Out << ' ';
diff --git a/lib/Lex/HeaderMap.cpp b/lib/Lex/HeaderMap.cpp
index f6c658e11926..09d53846d4cf 100644
--- a/lib/Lex/HeaderMap.cpp
+++ b/lib/Lex/HeaderMap.cpp
@@ -81,9 +81,9 @@ const HeaderMap *HeaderMap::Create(const FileEntry *FE, FileManager &FM) {
unsigned FileSize = FE->getSize();
if (FileSize <= sizeof(HMapHeader)) return nullptr;
- std::unique_ptr<const llvm::MemoryBuffer> FileBuffer(FM.getBufferForFile(FE));
+ auto FileBuffer = FM.getBufferForFile(FE);
if (!FileBuffer) return nullptr; // Unreadable file?
- const char *FileStart = FileBuffer->getBufferStart();
+ const char *FileStart = (*FileBuffer)->getBufferStart();
// We know the file is at least as big as the header, check it now.
const HMapHeader *Header = reinterpret_cast<const HMapHeader*>(FileStart);
@@ -103,11 +103,7 @@ const HeaderMap *HeaderMap::Create(const FileEntry *FE, FileManager &FM) {
if (Header->Reserved != 0) return nullptr;
// Okay, everything looks good, create the header map.
- return new HeaderMap(FileBuffer.release(), NeedsByteSwap);
-}
-
-HeaderMap::~HeaderMap() {
- delete FileBuffer;
+ return new HeaderMap(std::move(*FileBuffer), NeedsByteSwap);
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index c12d7314ee23..d6b255fb014e 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -50,7 +50,8 @@ HeaderSearch::HeaderSearch(IntrusiveRefCntPtr<HeaderSearchOptions> HSOpts,
const LangOptions &LangOpts,
const TargetInfo *Target)
: HSOpts(HSOpts), Diags(Diags), FileMgr(SourceMgr.getFileManager()),
- FrameworkMap(64), ModMap(SourceMgr, Diags, LangOpts, Target, *this) {
+ FrameworkMap(64), ModMap(SourceMgr, Diags, LangOpts, Target, *this),
+ LangOpts(LangOpts) {
AngledDirIdx = 0;
SystemDirIdx = 0;
NoCurDirSearch = false;
@@ -60,8 +61,6 @@ HeaderSearch::HeaderSearch(IntrusiveRefCntPtr<HeaderSearchOptions> HSOpts,
NumIncluded = 0;
NumMultiIncludeFileOptzn = 0;
NumFrameworkLookups = NumSubFrameworkLookups = 0;
-
- EnabledModules = LangOpts.Modules;
}
HeaderSearch::~HeaderSearch() {
@@ -114,7 +113,9 @@ const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
}
std::string HeaderSearch::getModuleFileName(Module *Module) {
- return getModuleFileName(Module->Name, Module->ModuleMap->getName());
+ const FileEntry *ModuleMap =
+ getModuleMap().getModuleMapFileForUniquing(Module);
+ return getModuleFileName(Module->Name, ModuleMap->getName());
}
std::string HeaderSearch::getModuleFileName(StringRef ModuleName,
@@ -130,15 +131,24 @@ std::string HeaderSearch::getModuleFileName(StringRef ModuleName,
llvm::sys::path::append(Result, ModuleName + ".pcm");
} else {
// Construct the name <ModuleName>-<hash of ModuleMapPath>.pcm which should
- // be globally unique to this particular module. To avoid false-negatives
- // on case-insensitive filesystems, we use lower-case, which is safe because
- // to cause a collision the modules must have the same name, which is an
- // error if they are imported in the same translation.
- SmallString<256> AbsModuleMapPath(ModuleMapPath);
- llvm::sys::fs::make_absolute(AbsModuleMapPath);
- llvm::APInt Code(64, llvm::hash_value(AbsModuleMapPath.str().lower()));
+ // ideally be globally unique to this particular module. Name collisions
+ // in the hash are safe (because any translation unit can only import one
+ // module with each name), but result in a loss of caching.
+ //
+ // To avoid false-negatives, we form as canonical a path as we can, and map
+ // to lower-case in case we're on a case-insensitive file system.
+ auto *Dir =
+ FileMgr.getDirectory(llvm::sys::path::parent_path(ModuleMapPath));
+ if (!Dir)
+ return std::string();
+ auto DirName = FileMgr.getCanonicalName(Dir);
+ auto FileName = llvm::sys::path::filename(ModuleMapPath);
+
+ llvm::hash_code Hash =
+ llvm::hash_combine(DirName.lower(), FileName.lower());
+
SmallString<128> HashStr;
- Code.toStringUnsigned(HashStr, /*Radix*/36);
+ llvm::APInt(64, size_t(Hash)).toStringUnsigned(HashStr, /*Radix*/36);
llvm::sys::path::append(Result, ModuleName + "-" + HashStr.str() + ".pcm");
}
return Result.str().str();
@@ -147,7 +157,7 @@ std::string HeaderSearch::getModuleFileName(StringRef ModuleName,
Module *HeaderSearch::lookupModule(StringRef ModuleName, bool AllowSearch) {
// Look in the module map to determine if there is a module by this name.
Module *Module = ModMap.findModule(ModuleName);
- if (Module || !AllowSearch)
+ if (Module || !AllowSearch || !LangOpts.ModulesImplicitMaps)
return Module;
// Look through the various header search paths to load any available module
@@ -564,27 +574,9 @@ static const char *copyString(StringRef Str, llvm::BumpPtrAllocator &Alloc) {
const FileEntry *HeaderSearch::LookupFile(
StringRef Filename, SourceLocation IncludeLoc, bool isAngled,
const DirectoryLookup *FromDir, const DirectoryLookup *&CurDir,
- ArrayRef<const FileEntry *> Includers, SmallVectorImpl<char> *SearchPath,
- SmallVectorImpl<char> *RelativePath,
+ ArrayRef<std::pair<const FileEntry *, const DirectoryEntry *>> Includers,
+ SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
ModuleMap::KnownHeader *SuggestedModule, bool SkipCache) {
- if (!HSOpts->ModuleMapFiles.empty()) {
- // Preload all explicitly specified module map files. This enables modules
- // map files lying in a directory structure separate from the header files
- // that they describe. These cannot be loaded lazily upon encountering a
- // header file, as there is no other known mapping from a header file to its
- // module map file.
- for (llvm::SetVector<std::string>::iterator
- I = HSOpts->ModuleMapFiles.begin(),
- E = HSOpts->ModuleMapFiles.end();
- I != E; ++I) {
- const FileEntry *File = FileMgr.getFile(*I);
- if (!File)
- continue;
- loadModuleMapFile(File, /*IsSystem=*/false);
- }
- HSOpts->ModuleMapFiles.clear();
- }
-
if (SuggestedModule)
*SuggestedModule = ModuleMap::KnownHeader();
@@ -616,25 +608,33 @@ const FileEntry *HeaderSearch::LookupFile(
// This search is not done for <> headers.
if (!Includers.empty() && !isAngled && !NoCurDirSearch) {
SmallString<1024> TmpDir;
- for (ArrayRef<const FileEntry *>::iterator I = Includers.begin(),
- E = Includers.end();
- I != E; ++I) {
- const FileEntry *Includer = *I;
+ bool First = true;
+ for (const auto &IncluderAndDir : Includers) {
+ const FileEntry *Includer = IncluderAndDir.first;
+
// Concatenate the requested file onto the directory.
// FIXME: Portability. Filename concatenation should be in sys::Path.
- TmpDir = Includer->getDir()->getName();
+ TmpDir = IncluderAndDir.second->getName();
TmpDir.push_back('/');
TmpDir.append(Filename.begin(), Filename.end());
// FIXME: We don't cache the result of getFileInfo across the call to
// getFileAndSuggestModule, because it's a reference to an element of
// a container that could be reallocated across this call.
+ //
+ // FIXME: If we have no includer, that means we're processing a #include
+ // from a module build. We should treat this as a system header if we're
+ // building a [system] module.
bool IncluderIsSystemHeader =
- getFileInfo(Includer).DirInfo != SrcMgr::C_User;
- if (const FileEntry *FE =
- getFileAndSuggestModule(*this, TmpDir.str(), Includer->getDir(),
- IncluderIsSystemHeader,
- SuggestedModule)) {
+ Includer && getFileInfo(Includer).DirInfo != SrcMgr::C_User;
+ if (const FileEntry *FE = getFileAndSuggestModule(
+ *this, TmpDir.str(), IncluderAndDir.second,
+ IncluderIsSystemHeader, SuggestedModule)) {
+ if (!Includer) {
+ assert(First && "only first includer can have no file");
+ return FE;
+ }
+
// Leave CurDir unset.
// This file is a system header or C++ unfriendly if the old file is.
//
@@ -652,7 +652,7 @@ const FileEntry *HeaderSearch::LookupFile(
ToHFI.Framework = Framework;
if (SearchPath) {
- StringRef SearchPathRef(Includer->getDir()->getName());
+ StringRef SearchPathRef(IncluderAndDir.second->getName());
SearchPath->clear();
SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
}
@@ -660,7 +660,7 @@ const FileEntry *HeaderSearch::LookupFile(
RelativePath->clear();
RelativePath->append(Filename.begin(), Filename.end());
}
- if (I == Includers.begin())
+ if (First)
return FE;
// Otherwise, we found the path via MSVC header search rules. If
@@ -677,6 +677,7 @@ const FileEntry *HeaderSearch::LookupFile(
break;
}
}
+ First = false;
}
}
@@ -694,8 +695,7 @@ const FileEntry *HeaderSearch::LookupFile(
// multiply included, and the "pragma once" optimization prevents them from
// being relex/pp'd, but they would still have to search through a
// (potentially huge) series of SearchDirs to find it.
- LookupFileCacheInfo &CacheLookup =
- LookupFileCache.GetOrCreateValue(Filename).getValue();
+ LookupFileCacheInfo &CacheLookup = LookupFileCache[Filename];
// If the entry has been previously looked up, the first value will be
// non-zero. If the value is equal to i (the start point of our search), then
@@ -776,9 +776,9 @@ const FileEntry *HeaderSearch::LookupFile(
// a header in a framework that is currently being built, and we couldn't
// resolve "foo.h" any other way, change the include to <Foo/foo.h>, where
// "Foo" is the name of the framework in which the including header was found.
- if (!Includers.empty() && !isAngled &&
+ if (!Includers.empty() && Includers.front().first && !isAngled &&
Filename.find('/') == StringRef::npos) {
- HeaderFileInfo &IncludingHFI = getFileInfo(Includers.front());
+ HeaderFileInfo &IncludingHFI = getFileInfo(Includers.front().first);
if (IncludingHFI.IndexHeaderMapHeader) {
SmallString<128> ScratchFilename;
ScratchFilename += IncludingHFI.Framework;
@@ -795,10 +795,8 @@ const FileEntry *HeaderSearch::LookupFile(
return MSFE;
}
- LookupFileCacheInfo &CacheLookup
- = LookupFileCache.GetOrCreateValue(Filename).getValue();
- CacheLookup.HitIdx
- = LookupFileCache.GetOrCreateValue(ScratchFilename).getValue().HitIdx;
+ LookupFileCacheInfo &CacheLookup = LookupFileCache[Filename];
+ CacheLookup.HitIdx = LookupFileCache[ScratchFilename].HitIdx;
// FIXME: SuggestedModule.
return FE;
}
@@ -851,18 +849,19 @@ LookupSubframeworkHeader(StringRef Filename,
FrameworkName.append(Filename.begin(), Filename.begin()+SlashPos);
FrameworkName += ".framework/";
- llvm::StringMapEntry<FrameworkCacheEntry> &CacheLookup =
- FrameworkMap.GetOrCreateValue(Filename.substr(0, SlashPos));
+ auto &CacheLookup =
+ *FrameworkMap.insert(std::make_pair(Filename.substr(0, SlashPos),
+ FrameworkCacheEntry())).first;
// Some other location?
- if (CacheLookup.getValue().Directory &&
- CacheLookup.getKeyLength() == FrameworkName.size() &&
- memcmp(CacheLookup.getKeyData(), &FrameworkName[0],
- CacheLookup.getKeyLength()) != 0)
+ if (CacheLookup.second.Directory &&
+ CacheLookup.first().size() == FrameworkName.size() &&
+ memcmp(CacheLookup.first().data(), &FrameworkName[0],
+ CacheLookup.first().size()) != 0)
return nullptr;
// Cache subframework.
- if (!CacheLookup.getValue().Directory) {
+ if (!CacheLookup.second.Directory) {
++NumSubFrameworkLookups;
// If the framework dir doesn't exist, we fail.
@@ -871,7 +870,7 @@ LookupSubframeworkHeader(StringRef Filename,
// Otherwise, if it does, remember that this is the right direntry for this
// framework.
- CacheLookup.getValue().Directory = Dir;
+ CacheLookup.second.Directory = Dir;
}
const FileEntry *FE = nullptr;
@@ -937,28 +936,6 @@ LookupSubframeworkHeader(StringRef Filename,
return FE;
}
-/// \brief Helper static function to normalize a path for injection into
-/// a synthetic header.
-/*static*/ std::string
-HeaderSearch::NormalizeDashIncludePath(StringRef File, FileManager &FileMgr) {
- // Implicit include paths should be resolved relative to the current
- // working directory first, and then use the regular header search
- // mechanism. The proper way to handle this is to have the
- // predefines buffer located at the current working directory, but
- // it has no file entry. For now, workaround this by using an
- // absolute path if we find the file here, and otherwise letting
- // header search handle it.
- SmallString<128> Path(File);
- llvm::sys::fs::make_absolute(Path);
- bool exists;
- if (llvm::sys::fs::exists(Path.str(), exists) || !exists)
- Path = File;
- else if (exists)
- FileMgr.getFile(File);
-
- return Lexer::Stringify(Path.str());
-}
-
//===----------------------------------------------------------------------===//
// File Info Management.
//===----------------------------------------------------------------------===//
@@ -1084,13 +1061,13 @@ size_t HeaderSearch::getTotalMemory() const {
}
StringRef HeaderSearch::getUniqueFrameworkName(StringRef Framework) {
- return FrameworkNames.GetOrCreateValue(Framework).getKey();
+ return FrameworkNames.insert(Framework).first->first();
}
bool HeaderSearch::hasModuleMap(StringRef FileName,
const DirectoryEntry *Root,
bool IsSystem) {
- if (!enabledModules())
+ if (!enabledModules() || !LangOpts.ModulesImplicitMaps)
return false;
SmallVector<const DirectoryEntry *, 2> FixUpDirectories;
@@ -1108,7 +1085,9 @@ bool HeaderSearch::hasModuleMap(StringRef FileName,
return false;
// Try to load the module map file in this directory.
- switch (loadModuleMapFile(Dir, IsSystem, /*IsFramework*/false)) {
+ switch (loadModuleMapFile(Dir, IsSystem,
+ llvm::sys::path::extension(Dir->getName()) ==
+ ".framework")) {
case LMM_NewlyLoaded:
case LMM_AlreadyLoaded:
// Success. All of the directories we stepped through inherit this module
@@ -1142,11 +1121,10 @@ HeaderSearch::findModuleForHeader(const FileEntry *File) const {
return ModMap.findModuleForHeader(File);
}
-static const FileEntry *getPrivateModuleMap(StringRef ModuleMapPath,
- const DirectoryEntry *Directory,
+static const FileEntry *getPrivateModuleMap(const FileEntry *File,
FileManager &FileMgr) {
- StringRef Filename = llvm::sys::path::filename(ModuleMapPath);
- SmallString<128> PrivateFilename(Directory->getName());
+ StringRef Filename = llvm::sys::path::filename(File->getName());
+ SmallString<128> PrivateFilename(File->getDir()->getName());
if (Filename == "module.map")
llvm::sys::path::append(PrivateFilename, "module_private.map");
else if (Filename == "module.modulemap")
@@ -1157,7 +1135,25 @@ static const FileEntry *getPrivateModuleMap(StringRef ModuleMapPath,
}
bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem) {
- switch (loadModuleMapFileImpl(File, IsSystem)) {
+ // Find the directory for the module. For frameworks, that may require going
+ // up from the 'Modules' directory.
+ const DirectoryEntry *Dir = nullptr;
+ if (getHeaderSearchOpts().ModuleMapFileHomeIsCwd)
+ Dir = FileMgr.getDirectory(".");
+ else {
+ Dir = File->getDir();
+ StringRef DirName(Dir->getName());
+ if (llvm::sys::path::filename(DirName) == "Modules") {
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.endswith(".framework"))
+ Dir = FileMgr.getDirectory(DirName);
+ // FIXME: This assert can fail if there's a race between the above check
+ // and the removal of the directory.
+ assert(Dir && "parent must exist");
+ }
+ }
+
+ switch (loadModuleMapFileImpl(File, IsSystem, Dir)) {
case LMM_AlreadyLoaded:
case LMM_NewlyLoaded:
return false;
@@ -1169,35 +1165,37 @@ bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem) {
}
HeaderSearch::LoadModuleMapResult
-HeaderSearch::loadModuleMapFileImpl(const FileEntry *File, bool IsSystem) {
+HeaderSearch::loadModuleMapFileImpl(const FileEntry *File, bool IsSystem,
+ const DirectoryEntry *Dir) {
assert(File && "expected FileEntry");
- const DirectoryEntry *Dir = File->getDir();
- auto KnownDir = DirectoryHasModuleMap.find(Dir);
- if (KnownDir != DirectoryHasModuleMap.end())
- return KnownDir->second ? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
+ // Check whether we've already loaded this module map, and mark it as being
+ // loaded in case we recursively try to load it from itself.
+ auto AddResult = LoadedModuleMaps.insert(std::make_pair(File, true));
+ if (!AddResult.second)
+ return AddResult.first->second ? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
- if (ModMap.parseModuleMapFile(File, IsSystem)) {
- DirectoryHasModuleMap[Dir] = false;
+ if (ModMap.parseModuleMapFile(File, IsSystem, Dir)) {
+ LoadedModuleMaps[File] = false;
return LMM_InvalidModuleMap;
}
// Try to load a corresponding private module map.
- if (const FileEntry *PMMFile =
- getPrivateModuleMap(File->getName(), Dir, FileMgr)) {
- if (ModMap.parseModuleMapFile(PMMFile, IsSystem)) {
- DirectoryHasModuleMap[Dir] = false;
+ if (const FileEntry *PMMFile = getPrivateModuleMap(File, FileMgr)) {
+ if (ModMap.parseModuleMapFile(PMMFile, IsSystem, Dir)) {
+ LoadedModuleMaps[File] = false;
return LMM_InvalidModuleMap;
}
}
// This directory has a module map.
- DirectoryHasModuleMap[Dir] = true;
return LMM_NewlyLoaded;
}
const FileEntry *
HeaderSearch::lookupModuleMapFile(const DirectoryEntry *Dir, bool IsFramework) {
+ if (!LangOpts.ModulesImplicitMaps)
+ return nullptr;
// For frameworks, the preferred spelling is Modules/module.modulemap, but
// module.map at the framework root is also accepted.
SmallString<128> ModuleMapFileName(Dir->getName());
@@ -1218,12 +1216,12 @@ Module *HeaderSearch::loadFrameworkModule(StringRef Name,
bool IsSystem) {
if (Module *Module = ModMap.findModule(Name))
return Module;
-
+
// Try to load a module map file.
switch (loadModuleMapFile(Dir, IsSystem, /*IsFramework*/true)) {
case LMM_InvalidModuleMap:
break;
-
+
case LMM_AlreadyLoaded:
case LMM_NoDirectory:
return nullptr;
@@ -1234,7 +1232,10 @@ Module *HeaderSearch::loadFrameworkModule(StringRef Name,
// Try to infer a module map from the framework directory.
- return ModMap.inferFrameworkModule(Name, Dir, IsSystem, /*Parent=*/nullptr);
+ if (LangOpts.ModulesImplicitMaps)
+ return ModMap.inferFrameworkModule(Name, Dir, IsSystem, /*Parent=*/nullptr);
+
+ return nullptr;
}
@@ -1252,15 +1253,18 @@ HeaderSearch::loadModuleMapFile(const DirectoryEntry *Dir, bool IsSystem,
bool IsFramework) {
auto KnownDir = DirectoryHasModuleMap.find(Dir);
if (KnownDir != DirectoryHasModuleMap.end())
- return KnownDir->second? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
+ return KnownDir->second ? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
if (const FileEntry *ModuleMapFile = lookupModuleMapFile(Dir, IsFramework)) {
- LoadModuleMapResult Result = loadModuleMapFileImpl(ModuleMapFile, IsSystem);
+ LoadModuleMapResult Result =
+ loadModuleMapFileImpl(ModuleMapFile, IsSystem, Dir);
// Add Dir explicitly in case ModuleMapFile is in a subdirectory.
// E.g. Foo.framework/Modules/module.modulemap
// ^Dir ^ModuleMapFile
if (Result == LMM_NewlyLoaded)
DirectoryHasModuleMap[Dir] = true;
+ else if (Result == LMM_InvalidModuleMap)
+ DirectoryHasModuleMap[Dir] = false;
return Result;
}
return LMM_InvalidModuleMap;
@@ -1268,45 +1272,49 @@ HeaderSearch::loadModuleMapFile(const DirectoryEntry *Dir, bool IsSystem,
void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
Modules.clear();
-
- // Load module maps for each of the header search directories.
- for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
- bool IsSystem = SearchDirs[Idx].isSystemHeaderDirectory();
- if (SearchDirs[Idx].isFramework()) {
- std::error_code EC;
- SmallString<128> DirNative;
- llvm::sys::path::native(SearchDirs[Idx].getFrameworkDir()->getName(),
- DirNative);
-
- // Search each of the ".framework" directories to load them as modules.
- for (llvm::sys::fs::directory_iterator Dir(DirNative.str(), EC), DirEnd;
- Dir != DirEnd && !EC; Dir.increment(EC)) {
- if (llvm::sys::path::extension(Dir->path()) != ".framework")
- continue;
-
- const DirectoryEntry *FrameworkDir = FileMgr.getDirectory(Dir->path());
- if (!FrameworkDir)
- continue;
-
- // Load this framework module.
- loadFrameworkModule(llvm::sys::path::stem(Dir->path()), FrameworkDir,
- IsSystem);
+
+ if (LangOpts.ModulesImplicitMaps) {
+ // Load module maps for each of the header search directories.
+ for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
+ bool IsSystem = SearchDirs[Idx].isSystemHeaderDirectory();
+ if (SearchDirs[Idx].isFramework()) {
+ std::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(SearchDirs[Idx].getFrameworkDir()->getName(),
+ DirNative);
+
+ // Search each of the ".framework" directories to load them as modules.
+ for (llvm::sys::fs::directory_iterator Dir(DirNative.str(), EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ if (llvm::sys::path::extension(Dir->path()) != ".framework")
+ continue;
+
+ const DirectoryEntry *FrameworkDir =
+ FileMgr.getDirectory(Dir->path());
+ if (!FrameworkDir)
+ continue;
+
+ // Load this framework module.
+ loadFrameworkModule(llvm::sys::path::stem(Dir->path()), FrameworkDir,
+ IsSystem);
+ }
+ continue;
}
- continue;
+
+ // FIXME: Deal with header maps.
+ if (SearchDirs[Idx].isHeaderMap())
+ continue;
+
+ // Try to load a module map file for the search directory.
+ loadModuleMapFile(SearchDirs[Idx].getDir(), IsSystem,
+ /*IsFramework*/ false);
+
+ // Try to load module map files for immediate subdirectories of this
+ // search directory.
+ loadSubdirectoryModuleMaps(SearchDirs[Idx]);
}
-
- // FIXME: Deal with header maps.
- if (SearchDirs[Idx].isHeaderMap())
- continue;
-
- // Try to load a module map file for the search directory.
- loadModuleMapFile(SearchDirs[Idx].getDir(), IsSystem, /*IsFramework*/false);
-
- // Try to load module map files for immediate subdirectories of this search
- // directory.
- loadSubdirectoryModuleMaps(SearchDirs[Idx]);
}
-
+
// Populate the list of modules.
for (ModuleMap::module_iterator M = ModMap.module_begin(),
MEnd = ModMap.module_end();
@@ -1316,6 +1324,9 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
}
void HeaderSearch::loadTopLevelSystemModules() {
+ if (!LangOpts.ModulesImplicitMaps)
+ return;
+
// Load module maps for each of the header search directories.
for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
// We only care about normal header directories.
@@ -1331,6 +1342,9 @@ void HeaderSearch::loadTopLevelSystemModules() {
}
void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) {
+ assert(LangOpts.ModulesImplicitMaps &&
+ "Should not be loading subdirectory module maps");
+
if (SearchDir.haveSearchedAllModuleMaps())
return;
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index 6f6b50b246d7..ca5252e1c9ce 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -540,16 +540,16 @@ namespace {
};
}
-std::pair<unsigned, bool>
-Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer,
- const LangOptions &LangOpts, unsigned MaxLines) {
+std::pair<unsigned, bool> Lexer::ComputePreamble(StringRef Buffer,
+ const LangOptions &LangOpts,
+ unsigned MaxLines) {
// Create a lexer starting at the beginning of the file. Note that we use a
// "fake" file source location at offset 1 so that the lexer will track our
// position within the file.
const unsigned StartOffset = 1;
SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset);
- Lexer TheLexer(FileLoc, LangOpts, Buffer->getBufferStart(),
- Buffer->getBufferStart(), Buffer->getBufferEnd());
+ Lexer TheLexer(FileLoc, LangOpts, Buffer.begin(), Buffer.begin(),
+ Buffer.end());
TheLexer.SetCommentRetentionState(true);
// StartLoc will differ from FileLoc if there is a BOM that was skipped.
@@ -563,9 +563,9 @@ Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer,
unsigned MaxLineOffset = 0;
if (MaxLines) {
- const char *CurPtr = Buffer->getBufferStart();
+ const char *CurPtr = Buffer.begin();
unsigned CurLine = 0;
- while (CurPtr != Buffer->getBufferEnd()) {
+ while (CurPtr != Buffer.end()) {
char ch = *CurPtr++;
if (ch == '\n') {
++CurLine;
@@ -573,8 +573,8 @@ Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer,
break;
}
}
- if (CurPtr != Buffer->getBufferEnd())
- MaxLineOffset = CurPtr - Buffer->getBufferStart();
+ if (CurPtr != Buffer.end())
+ MaxLineOffset = CurPtr - Buffer.begin();
}
do {
@@ -1597,7 +1597,7 @@ bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
}
// If we have a digit separator, continue.
- if (C == '\'' && getLangOpts().CPlusPlus1y) {
+ if (C == '\'' && getLangOpts().CPlusPlus14) {
unsigned NextSize;
char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, getLangOpts());
if (isIdentifierBody(Next)) {
@@ -1660,7 +1660,7 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
bool IsUDSuffix = false;
if (C == '_')
IsUDSuffix = true;
- else if (IsStringLiteral && getLangOpts().CPlusPlus1y) {
+ else if (IsStringLiteral && getLangOpts().CPlusPlus14) {
// In C++1y, we need to look ahead a few characters to see if this is a
// valid suffix for a string literal or a numeric literal (this could be
// the 'operator""if' defining a numeric literal operator).
@@ -1889,17 +1889,20 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
/// LexCharConstant - Lex the remainder of a character constant, after having
-/// lexed either ' or L' or u' or U'.
+/// lexed either ' or L' or u8' or u' or U'.
bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
tok::TokenKind Kind) {
// Does this character contain the \0 character?
const char *NulCharacter = nullptr;
- if (!isLexingRawMode() &&
- (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant))
- Diag(BufferPtr, getLangOpts().CPlusPlus
- ? diag::warn_cxx98_compat_unicode_literal
- : diag::warn_c99_compat_unicode_literal);
+ if (!isLexingRawMode()) {
+ if (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant)
+ Diag(BufferPtr, getLangOpts().CPlusPlus
+ ? diag::warn_cxx98_compat_unicode_literal
+ : diag::warn_c99_compat_unicode_literal);
+ else if (Kind == tok::utf8_char_constant)
+ Diag(BufferPtr, diag::warn_cxx14_compat_u8_character_literal);
+ }
char C = getAndAdvanceChar(CurPtr, Result);
if (C == '\'') {
@@ -2319,7 +2322,7 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
'/', '/', '/', '/', '/', '/', '/', '/'
};
while (CurPtr+16 <= BufferEnd &&
- !vec_any_eq(*(vector unsigned char*)CurPtr, Slashes))
+ !vec_any_eq(*(const vector unsigned char*)CurPtr, Slashes))
CurPtr += 16;
#else
// Scan for '/' quickly. Many block comments are very large.
@@ -2585,8 +2588,8 @@ static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd,
size_t Pos = RestOfBuffer.find(Terminator);
while (Pos != StringRef::npos) {
// Must occur at start of line.
- if (RestOfBuffer[Pos-1] != '\r' &&
- RestOfBuffer[Pos-1] != '\n') {
+ if (Pos == 0 ||
+ (RestOfBuffer[Pos - 1] != '\r' && RestOfBuffer[Pos - 1] != '\n')) {
RestOfBuffer = RestOfBuffer.substr(Pos+TermLen);
Pos = RestOfBuffer.find(Terminator);
continue;
@@ -3068,6 +3071,11 @@ LexNextToken:
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result),
tok::utf8_string_literal);
+ if (Char2 == '\'' && LangOpts.CPlusPlus1z)
+ return LexCharConstant(
+ Result, ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result),
+ tok::utf8_char_constant);
if (Char2 == 'R' && LangOpts.CPlusPlus11) {
unsigned SizeTmp3;
diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp
index 6417d0f0f5f2..03331fb33eb2 100644
--- a/lib/Lex/LiteralSupport.cpp
+++ b/lib/Lex/LiteralSupport.cpp
@@ -28,6 +28,7 @@ static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target) {
default: llvm_unreachable("Unknown token type!");
case tok::char_constant:
case tok::string_literal:
+ case tok::utf8_char_constant:
case tok::utf8_string_literal:
return Target.getCharWidth();
case tok::wide_char_constant:
@@ -656,7 +657,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
}
}
// "i", "if", and "il" are user-defined suffixes in C++1y.
- if (PP.getLangOpts().CPlusPlus1y && *s == 'i')
+ if (PP.getLangOpts().CPlusPlus14 && *s == 'i')
break;
// fall through.
case 'j':
@@ -716,7 +717,7 @@ bool NumericLiteralParser::isValidUDSuffix(const LangOptions &LangOpts,
return true;
// In C++11, there are no library suffixes.
- if (!LangOpts.CPlusPlus1y)
+ if (!LangOpts.CPlusPlus14)
return false;
// In C++1y, "s", "h", "min", "ms", "us", and "ns" are used in the library.
@@ -813,10 +814,10 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
if ((c1 == 'b' || c1 == 'B') && (c2 == '0' || c2 == '1')) {
// 0b101010 is a C++1y / GCC extension.
PP.Diag(TokLoc,
- PP.getLangOpts().CPlusPlus1y
+ PP.getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_binary_literal
: PP.getLangOpts().CPlusPlus
- ? diag::ext_binary_literal_cxx1y
+ ? diag::ext_binary_literal_cxx14
: diag::ext_binary_literal);
++s;
radix = 2;
@@ -1031,9 +1032,10 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
const char *TokBegin = begin;
// Skip over wide character determinant.
- if (Kind != tok::char_constant) {
+ if (Kind != tok::char_constant)
+ ++begin;
+ if (Kind == tok::utf8_char_constant)
++begin;
- }
// Skip over the entry quote.
assert(begin[0] == '\'' && "Invalid token lexed");
@@ -1077,6 +1079,8 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
if (tok::wide_char_constant == Kind) {
largest_character_for_kind =
0xFFFFFFFFu >> (32-PP.getTargetInfo().getWCharWidth());
+ } else if (tok::utf8_char_constant == Kind) {
+ largest_character_for_kind = 0x7F;
} else if (tok::utf16_char_constant == Kind) {
largest_character_for_kind = 0xFFFF;
} else if (tok::utf32_char_constant == Kind) {
diff --git a/lib/Lex/MacroArgs.cpp b/lib/Lex/MacroArgs.cpp
index a746fb7f48bf..9967f3f0e493 100644
--- a/lib/Lex/MacroArgs.cpp
+++ b/lib/Lex/MacroArgs.cpp
@@ -218,6 +218,7 @@ Token MacroArgs::StringifyArgument(const Token *ArgToks,
if (tok::isStringLiteral(Tok.getKind()) || // "foo", u8R"x(foo)x"_bar, etc.
Tok.is(tok::char_constant) || // 'x'
Tok.is(tok::wide_char_constant) || // L'x'.
+ Tok.is(tok::utf8_char_constant) || // u8'x'.
Tok.is(tok::utf16_char_constant) || // u'x'.
Tok.is(tok::utf32_char_constant)) { // U'x'.
bool Invalid = false;
@@ -233,14 +234,14 @@ Token MacroArgs::StringifyArgument(const Token *ArgToks,
// in place and avoid copies where possible.
unsigned CurStrLen = Result.size();
Result.resize(CurStrLen+Tok.getLength());
- const char *BufPtr = &Result[CurStrLen];
+ const char *BufPtr = Result.data() + CurStrLen;
bool Invalid = false;
unsigned ActualTokLen = PP.getSpelling(Tok, BufPtr, &Invalid);
if (!Invalid) {
// If getSpelling returned a pointer to an already uniqued version of
// the string instead of filling in BufPtr, memcpy it onto our string.
- if (BufPtr != &Result[CurStrLen])
+ if (ActualTokLen && BufPtr != &Result[CurStrLen])
memcpy(&Result[CurStrLen], BufPtr, ActualTokLen);
// If the token was dirty, the spelling may be shorter than the token.
diff --git a/lib/Lex/ModuleMap.cpp b/lib/Lex/ModuleMap.cpp
index 8fae9c933d33..ef322d8cdc4c 100644
--- a/lib/Lex/ModuleMap.cpp
+++ b/lib/Lex/ModuleMap.cpp
@@ -19,6 +19,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/LiteralSupport.h"
@@ -202,7 +203,7 @@ ModuleMap::findHeaderInUmbrellaDirs(const FileEntry *File,
return KnownHeader();
}
-// Returns 'true' if 'RequestingModule directly uses 'RequestedModule'.
+// Returns true if RequestingModule directly uses RequestedModule.
static bool directlyUses(const Module *RequestingModule,
const Module *RequestedModule) {
return std::find(RequestingModule->DirectUses.begin(),
@@ -214,19 +215,23 @@ static bool violatesPrivateInclude(Module *RequestingModule,
const FileEntry *IncFileEnt,
ModuleMap::ModuleHeaderRole Role,
Module *RequestedModule) {
- #ifndef NDEBUG
+ bool IsPrivateRole = Role & ModuleMap::PrivateHeader;
+#ifndef NDEBUG
// Check for consistency between the module header role
// as obtained from the lookup and as obtained from the module.
// This check is not cheap, so enable it only for debugging.
- SmallVectorImpl<const FileEntry *> &PvtHdrs
- = RequestedModule->PrivateHeaders;
- SmallVectorImpl<const FileEntry *>::iterator Look
- = std::find(PvtHdrs.begin(), PvtHdrs.end(), IncFileEnt);
- bool IsPrivate = Look != PvtHdrs.end();
- assert((IsPrivate && Role == ModuleMap::PrivateHeader)
- || (!IsPrivate && Role != ModuleMap::PrivateHeader));
- #endif
- return Role == ModuleMap::PrivateHeader &&
+ bool IsPrivate = false;
+ SmallVectorImpl<Module::Header> *HeaderList[] =
+ {&RequestedModule->Headers[Module::HK_Private],
+ &RequestedModule->Headers[Module::HK_PrivateTextual]};
+ for (auto *Hdrs : HeaderList)
+ IsPrivate |=
+ std::find_if(Hdrs->begin(), Hdrs->end(), [&](const Module::Header &H) {
+ return H.Entry == IncFileEnt;
+ }) != Hdrs->end();
+ assert(IsPrivate == IsPrivateRole && "inconsistent headers and roles");
+#endif
+ return IsPrivateRole &&
RequestedModule->getTopLevelModule() != RequestingModule;
}
@@ -253,12 +258,6 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
HeadersMap::iterator Known = findKnownHeader(File);
if (Known != Headers.end()) {
for (const KnownHeader &Header : Known->second) {
- // Excluded headers don't really belong to a module.
- if (Header.getRole() == ModuleMap::ExcludedHeader) {
- Excluded = true;
- continue;
- }
-
// If 'File' is part of 'RequestingModule' we can definitely include it.
if (Header.getModule() == RequestingModule)
return;
@@ -281,6 +280,8 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
// We have found a module that we can happily use.
return;
}
+
+ Excluded = true;
}
// We have found a header, but it is private.
@@ -315,20 +316,23 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
ModuleMap::KnownHeader
ModuleMap::findModuleForHeader(const FileEntry *File,
- Module *RequestingModule) {
+ Module *RequestingModule,
+ bool IncludeTextualHeaders) {
HeadersMap::iterator Known = findKnownHeader(File);
+ auto MakeResult = [&](ModuleMap::KnownHeader R) -> ModuleMap::KnownHeader {
+ if (!IncludeTextualHeaders && (R.getRole() & ModuleMap::TextualHeader))
+ return ModuleMap::KnownHeader();
+ return R;
+ };
+
if (Known != Headers.end()) {
- ModuleMap::KnownHeader Result = KnownHeader();
+ ModuleMap::KnownHeader Result;
// Iterate over all modules that 'File' is part of to find the best fit.
for (SmallVectorImpl<KnownHeader>::iterator I = Known->second.begin(),
E = Known->second.end();
I != E; ++I) {
- // Cannot use a module if the header is excluded in it.
- if (I->getRole() == ModuleMap::ExcludedHeader)
- continue;
-
// Cannot use a module if it is unavailable.
if (!I->getModule()->isAvailable())
continue;
@@ -336,7 +340,7 @@ ModuleMap::findModuleForHeader(const FileEntry *File,
// If 'File' is part of 'RequestingModule', 'RequestingModule' is the
// module we are looking for.
if (I->getModule() == RequestingModule)
- return *I;
+ return MakeResult(*I);
// If uses need to be specified explicitly, we are only allowed to return
// modules that are explicitly used by the requesting module.
@@ -344,15 +348,11 @@ ModuleMap::findModuleForHeader(const FileEntry *File,
!directlyUses(RequestingModule, I->getModule()))
continue;
- Result = *I;
- // If 'File' is a public header of this module, this is as good as we
- // are going to get.
- // FIXME: If we have a RequestingModule, we should prefer the header from
- // that module.
- if (I->getRole() == ModuleMap::NormalHeader)
- break;
+ // Prefer a public header over a private header.
+ if (!Result || (Result.getRole() & ModuleMap::PrivateHeader))
+ Result = *I;
}
- return Result;
+ return MakeResult(Result);
}
SmallVector<const DirectoryEntry *, 2> SkippedDirs;
@@ -367,6 +367,9 @@ ModuleMap::findModuleForHeader(const FileEntry *File,
UmbrellaModule = UmbrellaModule->Parent;
if (UmbrellaModule->InferSubmodules) {
+ const FileEntry *UmbrellaModuleMap =
+ getModuleMapFileForUniquing(UmbrellaModule);
+
// Infer submodules for each of the directories we found between
// the directory of the umbrella header and the directory where
// the actual header is located.
@@ -377,8 +380,9 @@ ModuleMap::findModuleForHeader(const FileEntry *File,
SmallString<32> NameBuf;
StringRef Name = sanitizeFilenameAsIdentifier(
llvm::sys::path::stem(SkippedDirs[I-1]->getName()), NameBuf);
- Result = findOrCreateModule(Name, Result, UmbrellaModule->ModuleMap,
- /*IsFramework=*/false, Explicit).first;
+ Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
+ Explicit).first;
+ InferredModuleAllowedBy[Result] = UmbrellaModuleMap;
Result->IsInferred = true;
// Associate the module and the directory.
@@ -394,8 +398,9 @@ ModuleMap::findModuleForHeader(const FileEntry *File,
SmallString<32> NameBuf;
StringRef Name = sanitizeFilenameAsIdentifier(
llvm::sys::path::stem(File->getName()), NameBuf);
- Result = findOrCreateModule(Name, Result, UmbrellaModule->ModuleMap,
- /*IsFramework=*/false, Explicit).first;
+ Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
+ Explicit).first;
+ InferredModuleAllowedBy[Result] = UmbrellaModuleMap;
Result->IsInferred = true;
Result->addTopHeader(File);
@@ -417,9 +422,9 @@ ModuleMap::findModuleForHeader(const FileEntry *File,
if (!Result->isAvailable())
return KnownHeader();
- return Headers[File].back();
+ return MakeResult(Headers[File].back());
}
-
+
return KnownHeader();
}
@@ -535,15 +540,14 @@ Module *ModuleMap::lookupModuleQualified(StringRef Name, Module *Context) const{
}
std::pair<Module *, bool>
-ModuleMap::findOrCreateModule(StringRef Name, Module *Parent,
- const FileEntry *ModuleMap, bool IsFramework,
+ModuleMap::findOrCreateModule(StringRef Name, Module *Parent, bool IsFramework,
bool IsExplicit) {
// Try to find an existing module with this name.
if (Module *Sub = lookupModuleQualified(Name, Parent))
return std::make_pair(Sub, false);
// Create a new module with this name.
- Module *Result = new Module(Name, SourceLocation(), Parent, ModuleMap,
+ Module *Result = new Module(Name, SourceLocation(), Parent,
IsFramework, IsExplicit);
if (LangOpts.CurrentModule == Name) {
SourceModule = Result;
@@ -559,30 +563,6 @@ ModuleMap::findOrCreateModule(StringRef Name, Module *Parent,
return std::make_pair(Result, true);
}
-bool ModuleMap::canInferFrameworkModule(const DirectoryEntry *ParentDir,
- StringRef Name, bool &IsSystem) const {
- // Check whether we have already looked into the parent directory
- // for a module map.
- llvm::DenseMap<const DirectoryEntry *, InferredDirectory>::const_iterator
- inferred = InferredDirectories.find(ParentDir);
- if (inferred == InferredDirectories.end())
- return false;
-
- if (!inferred->second.InferModules)
- return false;
-
- // We're allowed to infer for this directory, but make sure it's okay
- // to infer this particular module.
- bool canInfer = std::find(inferred->second.ExcludedModules.begin(),
- inferred->second.ExcludedModules.end(),
- Name) == inferred->second.ExcludedModules.end();
-
- if (canInfer && inferred->second.InferSystemModules)
- IsSystem = true;
-
- return canInfer;
-}
-
/// \brief For a framework module, infer the framework against which we
/// should link.
static void inferFrameworkLink(Module *Mod, const DirectoryEntry *FrameworkDir,
@@ -605,6 +585,15 @@ ModuleMap::inferFrameworkModule(StringRef ModuleName,
const DirectoryEntry *FrameworkDir,
bool IsSystem,
Module *Parent) {
+ Attributes Attrs;
+ Attrs.IsSystem = IsSystem;
+ return inferFrameworkModule(ModuleName, FrameworkDir, Attrs, Parent);
+}
+
+Module *ModuleMap::inferFrameworkModule(StringRef ModuleName,
+ const DirectoryEntry *FrameworkDir,
+ Attributes Attrs, Module *Parent) {
+
// Check whether we've already found this module.
if (Module *Mod = lookupModuleQualified(ModuleName, Parent))
return Mod;
@@ -645,7 +634,7 @@ ModuleMap::inferFrameworkModule(StringRef ModuleName,
bool IsFrameworkDir = Parent.endswith(".framework");
if (const FileEntry *ModMapFile =
HeaderInfo.lookupModuleMapFile(ParentDir, IsFrameworkDir)) {
- parseModuleMapFile(ModMapFile, IsSystem);
+ parseModuleMapFile(ModMapFile, Attrs.IsSystem, ParentDir);
inferred = InferredDirectories.find(ParentDir);
}
@@ -662,8 +651,9 @@ ModuleMap::inferFrameworkModule(StringRef ModuleName,
inferred->second.ExcludedModules.end(),
Name) == inferred->second.ExcludedModules.end();
- if (inferred->second.InferSystemModules)
- IsSystem = true;
+ Attrs.IsSystem |= inferred->second.Attrs.IsSystem;
+ Attrs.IsExternC |= inferred->second.Attrs.IsExternC;
+ Attrs.IsExhaustive |= inferred->second.Attrs.IsExhaustive;
ModuleMapFile = inferred->second.ModuleMapFile;
}
}
@@ -673,7 +663,7 @@ ModuleMap::inferFrameworkModule(StringRef ModuleName,
if (!canInfer)
return nullptr;
} else
- ModuleMapFile = Parent->ModuleMap;
+ ModuleMapFile = getModuleMapFileForUniquing(Parent);
// Look for an umbrella header.
@@ -687,15 +677,19 @@ ModuleMap::inferFrameworkModule(StringRef ModuleName,
if (!UmbrellaHeader)
return nullptr;
- Module *Result = new Module(ModuleName, SourceLocation(), Parent, ModuleMapFile,
+ Module *Result = new Module(ModuleName, SourceLocation(), Parent,
/*IsFramework=*/true, /*IsExplicit=*/false);
+ InferredModuleAllowedBy[Result] = ModuleMapFile;
+ Result->IsInferred = true;
if (LangOpts.CurrentModule == ModuleName) {
SourceModule = Result;
SourceModuleName = ModuleName;
}
- if (IsSystem)
- Result->IsSystem = IsSystem;
-
+
+ Result->IsSystem |= Attrs.IsSystem;
+ Result->IsExternC |= Attrs.IsExternC;
+ Result->ConfigMacrosExhaustive |= Attrs.IsExhaustive;
+
if (!Parent)
Modules[ModuleName] = Result;
@@ -750,8 +744,8 @@ ModuleMap::inferFrameworkModule(StringRef ModuleName,
// FIXME: Do we want to warn about subframeworks without umbrella headers?
SmallString<32> NameBuf;
inferFrameworkModule(sanitizeFilenameAsIdentifier(
- llvm::sys::path::stem(Dir->path()), NameBuf),
- SubframeworkDir, IsSystem, Result);
+ llvm::sys::path::stem(Dir->path()), NameBuf),
+ SubframeworkDir, Attrs, Result);
}
}
@@ -775,23 +769,44 @@ void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir) {
UmbrellaDirs[UmbrellaDir] = Mod;
}
-void ModuleMap::addHeader(Module *Mod, const FileEntry *Header,
+static Module::HeaderKind headerRoleToKind(ModuleMap::ModuleHeaderRole Role) {
+ switch ((int)Role) {
+ default: llvm_unreachable("unknown header role");
+ case ModuleMap::NormalHeader:
+ return Module::HK_Normal;
+ case ModuleMap::PrivateHeader:
+ return Module::HK_Private;
+ case ModuleMap::TextualHeader:
+ return Module::HK_Textual;
+ case ModuleMap::PrivateHeader | ModuleMap::TextualHeader:
+ return Module::HK_PrivateTextual;
+ }
+}
+
+void ModuleMap::addHeader(Module *Mod, Module::Header Header,
ModuleHeaderRole Role) {
- if (Role == ExcludedHeader) {
- Mod->ExcludedHeaders.push_back(Header);
- } else {
- if (Role == PrivateHeader)
- Mod->PrivateHeaders.push_back(Header);
- else
- Mod->NormalHeaders.push_back(Header);
+ if (!(Role & TextualHeader)) {
bool isCompilingModuleHeader = Mod->getTopLevelModule() == CompilingModule;
- HeaderInfo.MarkFileModuleHeader(Header, Role, isCompilingModuleHeader);
+ HeaderInfo.MarkFileModuleHeader(Header.Entry, Role,
+ isCompilingModuleHeader);
}
- Headers[Header].push_back(KnownHeader(Mod, Role));
+ Headers[Header.Entry].push_back(KnownHeader(Mod, Role));
+
+ Mod->Headers[headerRoleToKind(Role)].push_back(std::move(Header));
+}
+
+void ModuleMap::excludeHeader(Module *Mod, Module::Header Header) {
+ // Add this as a known header so we won't implicitly add it to any
+ // umbrella directory module.
+ // FIXME: Should we only exclude it from umbrella modules within the
+ // specified module?
+ (void) Headers[Header.Entry];
+
+ Mod->Headers[Module::HK_Excluded].push_back(std::move(Header));
}
const FileEntry *
-ModuleMap::getContainingModuleMapFile(Module *Module) const {
+ModuleMap::getContainingModuleMapFile(const Module *Module) const {
if (Module->DefinitionLoc.isInvalid())
return nullptr;
@@ -799,6 +814,19 @@ ModuleMap::getContainingModuleMapFile(Module *Module) const {
SourceMgr.getFileID(Module->DefinitionLoc));
}
+const FileEntry *ModuleMap::getModuleMapFileForUniquing(const Module *M) const {
+ if (M->IsInferred) {
+ assert(InferredModuleAllowedBy.count(M) && "missing inferred module map");
+ return InferredModuleAllowedBy.find(M)->second;
+ }
+ return getContainingModuleMapFile(M);
+}
+
+void ModuleMap::setInferredModuleAllowedBy(Module *M, const FileEntry *ModMap) {
+ assert(M->IsInferred && "module not inferred");
+ InferredModuleAllowedBy[M] = ModMap;
+}
+
void ModuleMap::dump() {
llvm::errs() << "Modules:";
for (llvm::StringMap<Module *>::iterator M = Modules.begin(),
@@ -927,6 +955,7 @@ namespace clang {
RequiresKeyword,
Star,
StringLiteral,
+ TextualKeyword,
LBrace,
RBrace,
LSquare,
@@ -955,21 +984,6 @@ namespace clang {
}
};
- /// \brief The set of attributes that can be attached to a module.
- struct Attributes {
- Attributes() : IsSystem(), IsExternC(), IsExhaustive() { }
-
- /// \brief Whether this is a system module.
- unsigned IsSystem : 1;
-
- /// \brief Whether this is an extern "C" module.
- unsigned IsExternC : 1;
-
- /// \brief Whether this is an exhaustive set of configuration macros.
- unsigned IsExhaustive : 1;
- };
-
-
class ModuleMapParser {
Lexer &L;
SourceManager &SourceMgr;
@@ -984,7 +998,8 @@ namespace clang {
/// \brief The current module map file.
const FileEntry *ModuleMapFile;
- /// \brief The directory that this module map resides in.
+ /// \brief The directory that file names in this module map file should
+ /// be resolved relative to.
const DirectoryEntry *Directory;
/// \brief The directory containing Clang-supplied headers.
@@ -1027,6 +1042,8 @@ namespace clang {
void parseConfigMacros();
void parseConflict();
void parseInferredModuleDecl(bool Framework, bool Explicit);
+
+ typedef ModuleMap::Attributes Attributes;
bool parseOptionalAttributes(Attributes &Attrs);
public:
@@ -1077,6 +1094,7 @@ retry:
.Case("module", MMToken::ModuleKeyword)
.Case("private", MMToken::PrivateKeyword)
.Case("requires", MMToken::RequiresKeyword)
+ .Case("textual", MMToken::TextualKeyword)
.Case("umbrella", MMToken::UmbrellaKeyword)
.Case("use", MMToken::UseKeyword)
.Default(MMToken::Identifier);
@@ -1328,8 +1346,11 @@ void ModuleMapParser::parseModuleDecl() {
// This module map defines a submodule. Go find the module of which it
// is a submodule.
ActiveModule = nullptr;
+ const Module *TopLevelModule = nullptr;
for (unsigned I = 0, N = Id.size() - 1; I != N; ++I) {
if (Module *Next = Map.lookupModuleQualified(Id[I].first, ActiveModule)) {
+ if (I == 0)
+ TopLevelModule = Next;
ActiveModule = Next;
continue;
}
@@ -1344,7 +1365,14 @@ void ModuleMapParser::parseModuleDecl() {
HadError = true;
return;
}
- }
+
+ if (ModuleMapFile != Map.getContainingModuleMapFile(TopLevelModule)) {
+ assert(ModuleMapFile != Map.getModuleMapFileForUniquing(TopLevelModule) &&
+ "submodule defined in same file as 'module *' that allowed its "
+ "top-level module");
+ Map.addAdditionalModuleMapFile(TopLevelModule, ModuleMapFile);
+ }
+ }
StringRef ModuleName = Id.back().first;
SourceLocation ModuleNameLoc = Id.back().second;
@@ -1390,19 +1418,15 @@ void ModuleMapParser::parseModuleDecl() {
return;
}
- // If this is a submodule, use the parent's module map, since we don't want
- // the private module map file.
- const FileEntry *ModuleMap = ActiveModule ? ActiveModule->ModuleMap
- : ModuleMapFile;
-
// Start defining this module.
- ActiveModule = Map.findOrCreateModule(ModuleName, ActiveModule, ModuleMap,
- Framework, Explicit).first;
+ ActiveModule = Map.findOrCreateModule(ModuleName, ActiveModule, Framework,
+ Explicit).first;
ActiveModule->DefinitionLoc = ModuleNameLoc;
if (Attrs.IsSystem || IsSystem)
ActiveModule->IsSystem = true;
if (Attrs.IsExternC)
ActiveModule->IsExternC = true;
+ ActiveModule->Directory = Directory;
bool Done = false;
do {
@@ -1439,6 +1463,10 @@ void ModuleMapParser::parseModuleDecl() {
parseRequiresDecl();
break;
+ case MMToken::TextualKeyword:
+ parseHeaderDecl(MMToken::TextualKeyword, consumeToken());
+ break;
+
case MMToken::UmbrellaKeyword: {
SourceLocation UmbrellaLoc = consumeToken();
if (Tok.is(MMToken::HeaderKeyword))
@@ -1447,31 +1475,17 @@ void ModuleMapParser::parseModuleDecl() {
parseUmbrellaDirDecl(UmbrellaLoc);
break;
}
-
- case MMToken::ExcludeKeyword: {
- SourceLocation ExcludeLoc = consumeToken();
- if (Tok.is(MMToken::HeaderKeyword)) {
- parseHeaderDecl(MMToken::ExcludeKeyword, ExcludeLoc);
- } else {
- Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
- << "exclude";
- }
+
+ case MMToken::ExcludeKeyword:
+ parseHeaderDecl(MMToken::ExcludeKeyword, consumeToken());
break;
- }
-
- case MMToken::PrivateKeyword: {
- SourceLocation PrivateLoc = consumeToken();
- if (Tok.is(MMToken::HeaderKeyword)) {
- parseHeaderDecl(MMToken::PrivateKeyword, PrivateLoc);
- } else {
- Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
- << "private";
- }
+
+ case MMToken::PrivateKeyword:
+ parseHeaderDecl(MMToken::PrivateKeyword, consumeToken());
break;
- }
-
+
case MMToken::HeaderKeyword:
- parseHeaderDecl(MMToken::HeaderKeyword, SourceLocation());
+ parseHeaderDecl(MMToken::HeaderKeyword, consumeToken());
break;
case MMToken::LinkKeyword:
@@ -1554,7 +1568,11 @@ void ModuleMapParser::parseExternModuleDecl() {
FileNameRef = ModuleMapFileName.str();
}
if (const FileEntry *File = SourceMgr.getFileManager().getFile(FileNameRef))
- Map.parseModuleMapFile(File, /*IsSystem=*/false);
+ Map.parseModuleMapFile(
+ File, /*IsSystem=*/false,
+ Map.HeaderInfo.getHeaderSearchOpts().ModuleMapFileHomeIsCwd
+ ? Directory
+ : File->getDir());
}
/// \brief Parse a requires declaration.
@@ -1626,12 +1644,37 @@ static void appendSubframeworkPaths(Module *Mod,
/// \brief Parse a header declaration.
///
/// header-declaration:
-/// 'umbrella'[opt] 'header' string-literal
-/// 'exclude'[opt] 'header' string-literal
+/// 'textual'[opt] 'header' string-literal
+/// 'private' 'textual'[opt] 'header' string-literal
+/// 'exclude' 'header' string-literal
+/// 'umbrella' 'header' string-literal
+///
+/// FIXME: Support 'private textual header'.
void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
SourceLocation LeadingLoc) {
- assert(Tok.is(MMToken::HeaderKeyword));
- consumeToken();
+ // We've already consumed the first token.
+ ModuleMap::ModuleHeaderRole Role = ModuleMap::NormalHeader;
+ if (LeadingToken == MMToken::PrivateKeyword) {
+ Role = ModuleMap::PrivateHeader;
+ // 'private' may optionally be followed by 'textual'.
+ if (Tok.is(MMToken::TextualKeyword)) {
+ LeadingToken = Tok.Kind;
+ consumeToken();
+ }
+ }
+ if (LeadingToken == MMToken::TextualKeyword)
+ Role = ModuleMap::ModuleHeaderRole(Role | ModuleMap::TextualHeader);
+
+ if (LeadingToken != MMToken::HeaderKeyword) {
+ if (!Tok.is(MMToken::HeaderKeyword)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
+ << (LeadingToken == MMToken::PrivateKeyword ? "private" :
+ LeadingToken == MMToken::ExcludeKeyword ? "exclude" :
+ LeadingToken == MMToken::TextualKeyword ? "textual" : "umbrella");
+ return;
+ }
+ consumeToken();
+ }
// Parse the header name.
if (!Tok.is(MMToken::StringLiteral)) {
@@ -1640,7 +1683,7 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
HadError = true;
return;
}
- Module::HeaderDirective Header;
+ Module::UnresolvedHeaderDirective Header;
Header.FileName = Tok.getString();
Header.FileNameLoc = consumeToken();
@@ -1655,33 +1698,39 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
// Look for this file.
const FileEntry *File = nullptr;
const FileEntry *BuiltinFile = nullptr;
- SmallString<128> PathName;
+ SmallString<128> RelativePathName;
if (llvm::sys::path::is_absolute(Header.FileName)) {
- PathName = Header.FileName;
- File = SourceMgr.getFileManager().getFile(PathName);
+ RelativePathName = Header.FileName;
+ File = SourceMgr.getFileManager().getFile(RelativePathName);
} else {
// Search for the header file within the search directory.
- PathName = Directory->getName();
- unsigned PathLength = PathName.size();
+ SmallString<128> FullPathName(Directory->getName());
+ unsigned FullPathLength = FullPathName.size();
if (ActiveModule->isPartOfFramework()) {
- appendSubframeworkPaths(ActiveModule, PathName);
+ appendSubframeworkPaths(ActiveModule, RelativePathName);
// Check whether this file is in the public headers.
- llvm::sys::path::append(PathName, "Headers", Header.FileName);
- File = SourceMgr.getFileManager().getFile(PathName);
+ llvm::sys::path::append(RelativePathName, "Headers", Header.FileName);
+ llvm::sys::path::append(FullPathName, RelativePathName.str());
+ File = SourceMgr.getFileManager().getFile(FullPathName);
if (!File) {
// Check whether this file is in the private headers.
- PathName.resize(PathLength);
- llvm::sys::path::append(PathName, "PrivateHeaders", Header.FileName);
- File = SourceMgr.getFileManager().getFile(PathName);
+ // FIXME: Should we retain the subframework paths here?
+ RelativePathName.clear();
+ FullPathName.resize(FullPathLength);
+ llvm::sys::path::append(RelativePathName, "PrivateHeaders",
+ Header.FileName);
+ llvm::sys::path::append(FullPathName, RelativePathName.str());
+ File = SourceMgr.getFileManager().getFile(FullPathName);
}
} else {
// Lookup for normal headers.
- llvm::sys::path::append(PathName, Header.FileName);
- File = SourceMgr.getFileManager().getFile(PathName);
-
+ llvm::sys::path::append(RelativePathName, Header.FileName);
+ llvm::sys::path::append(FullPathName, RelativePathName.str());
+ File = SourceMgr.getFileManager().getFile(FullPathName);
+
// If this is a system module with a top-level header, this header
// may have a counterpart (or replacement) in the set of headers
// supplied by Clang. Find that builtin header.
@@ -1691,18 +1740,19 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
SmallString<128> BuiltinPathName(BuiltinIncludeDir->getName());
llvm::sys::path::append(BuiltinPathName, Header.FileName);
BuiltinFile = SourceMgr.getFileManager().getFile(BuiltinPathName);
-
+
// If Clang supplies this header but the underlying system does not,
// just silently swap in our builtin version. Otherwise, we'll end
// up adding both (later).
if (!File && BuiltinFile) {
File = BuiltinFile;
+ RelativePathName = BuiltinPathName;
BuiltinFile = nullptr;
}
}
}
}
-
+
// FIXME: We shouldn't be eagerly stat'ing every file named in a module map.
// Come up with a lazy way to do this.
if (File) {
@@ -1716,21 +1766,24 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
// Record this umbrella header.
Map.setUmbrellaHeader(ActiveModule, File);
}
+ } else if (LeadingToken == MMToken::ExcludeKeyword) {
+ Module::Header H = {RelativePathName.str(), File};
+ Map.excludeHeader(ActiveModule, H);
} else {
+ // If there is a builtin counterpart to this file, add it now, before
+ // the "real" header, so we build the built-in one first when building
+ // the module.
+ if (BuiltinFile) {
+ // FIXME: Taking the name from the FileEntry is unstable and can give
+ // different results depending on how we've previously named that file
+ // in this build.
+ Module::Header H = { BuiltinFile->getName(), BuiltinFile };
+ Map.addHeader(ActiveModule, H, Role);
+ }
+
// Record this header.
- ModuleMap::ModuleHeaderRole Role = ModuleMap::NormalHeader;
- if (LeadingToken == MMToken::ExcludeKeyword)
- Role = ModuleMap::ExcludedHeader;
- else if (LeadingToken == MMToken::PrivateKeyword)
- Role = ModuleMap::PrivateHeader;
- else
- assert(LeadingToken == MMToken::HeaderKeyword);
-
- Map.addHeader(ActiveModule, File, Role);
-
- // If there is a builtin counterpart to this file, add it now.
- if (BuiltinFile)
- Map.addHeader(ActiveModule, BuiltinFile, Role);
+ Module::Header H = { RelativePathName.str(), File };
+ Map.addHeader(ActiveModule, H, Role);
}
} else if (LeadingToken != MMToken::ExcludeKeyword) {
// Ignore excluded header files. They're optional anyway.
@@ -1813,6 +1866,7 @@ void ModuleMapParser::parseExportDecl() {
ModuleId ParsedModuleId;
bool Wildcard = false;
do {
+ // FIXME: Support string-literal module names here.
if (Tok.is(MMToken::Identifier)) {
ParsedModuleId.push_back(std::make_pair(Tok.getString(),
Tok.getLocation()));
@@ -1910,6 +1964,7 @@ void ModuleMapParser::parseConfigMacros() {
}
// If we don't have an identifier, we're done.
+ // FIXME: Support macros with the same name as a keyword here.
if (!Tok.is(MMToken::Identifier))
return;
@@ -1926,6 +1981,7 @@ void ModuleMapParser::parseConfigMacros() {
consumeToken();
// We expect to see a macro name here.
+ // FIXME: Support macros with the same name as a keyword here.
if (!Tok.is(MMToken::Identifier)) {
Diags.Report(Tok.getLocation(), diag::err_mmap_expected_config_macro);
break;
@@ -2060,7 +2116,7 @@ void ModuleMapParser::parseInferredModuleDecl(bool Framework, bool Explicit) {
} else {
// We'll be inferring framework modules for this directory.
Map.InferredDirectories[Directory].InferModules = true;
- Map.InferredDirectories[Directory].InferSystemModules = Attrs.IsSystem;
+ Map.InferredDirectories[Directory].Attrs = Attrs;
Map.InferredDirectories[Directory].ModuleMapFile = ModuleMapFile;
// FIXME: Handle the 'framework' keyword.
}
@@ -2091,6 +2147,7 @@ void ModuleMapParser::parseInferredModuleDecl(bool Framework, bool Explicit) {
}
consumeToken();
+ // FIXME: Support string-literal module names here.
if (!Tok.is(MMToken::Identifier)) {
Diags.Report(Tok.getLocation(), diag::err_mmap_missing_exclude_name);
break;
@@ -2246,6 +2303,7 @@ bool ModuleMapParser::parseModuleMapFile() {
case MMToken::RequiresKeyword:
case MMToken::Star:
case MMToken::StringLiteral:
+ case MMToken::TextualKeyword:
case MMToken::UmbrellaKeyword:
case MMToken::UseKeyword:
Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module);
@@ -2256,7 +2314,8 @@ bool ModuleMapParser::parseModuleMapFile() {
} while (true);
}
-bool ModuleMap::parseModuleMapFile(const FileEntry *File, bool IsSystem) {
+bool ModuleMap::parseModuleMapFile(const FileEntry *File, bool IsSystem,
+ const DirectoryEntry *Dir) {
llvm::DenseMap<const FileEntry *, bool>::iterator Known
= ParsedModuleMap.find(File);
if (Known != ParsedModuleMap.end())
@@ -2269,17 +2328,6 @@ bool ModuleMap::parseModuleMapFile(const FileEntry *File, bool IsSystem) {
if (!Buffer)
return ParsedModuleMap[File] = true;
- // Find the directory for the module. For frameworks, that may require going
- // up from the 'Modules' directory.
- const DirectoryEntry *Dir = File->getDir();
- StringRef DirName(Dir->getName());
- if (llvm::sys::path::filename(DirName) == "Modules") {
- DirName = llvm::sys::path::parent_path(DirName);
- if (DirName.endswith(".framework"))
- Dir = SourceMgr.getFileManager().getDirectory(DirName);
- assert(Dir && "parent must exist");
- }
-
// Parse this module map file.
Lexer L(ID, SourceMgr.getBuffer(ID), SourceMgr, MMapLangOpts);
ModuleMapParser Parser(L, SourceMgr, Target, Diags, *this, File, Dir,
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index 1741c302f338..bf0ce72f6668 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -25,7 +25,7 @@
#include "clang/Lex/Pragma.h"
#include "llvm/ADT/APInt.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
@@ -34,23 +34,10 @@ using namespace clang;
//===----------------------------------------------------------------------===//
MacroInfo *Preprocessor::AllocateMacroInfo() {
- MacroInfoChain *MIChain;
-
- if (MICache) {
- MIChain = MICache;
- MICache = MICache->Next;
- }
- else {
- MIChain = BP.Allocate<MacroInfoChain>();
- }
-
+ MacroInfoChain *MIChain = BP.Allocate<MacroInfoChain>();
MIChain->Next = MIChainHead;
- MIChain->Prev = nullptr;
- if (MIChainHead)
- MIChainHead->Prev = MIChain;
MIChainHead = MIChain;
-
- return &(MIChain->MI);
+ return &MIChain->MI;
}
MacroInfo *Preprocessor::AllocateMacroInfo(SourceLocation L) {
@@ -77,45 +64,30 @@ MacroInfo *Preprocessor::AllocateDeserializedMacroInfo(SourceLocation L,
DefMacroDirective *
Preprocessor::AllocateDefMacroDirective(MacroInfo *MI, SourceLocation Loc,
- bool isImported) {
- DefMacroDirective *MD = BP.Allocate<DefMacroDirective>();
- new (MD) DefMacroDirective(MI, Loc, isImported);
- return MD;
+ unsigned ImportedFromModuleID,
+ ArrayRef<unsigned> Overrides) {
+ unsigned NumExtra = (ImportedFromModuleID ? 1 : 0) + Overrides.size();
+ return new (BP.Allocate(sizeof(DefMacroDirective) +
+ sizeof(unsigned) * NumExtra,
+ llvm::alignOf<DefMacroDirective>()))
+ DefMacroDirective(MI, Loc, ImportedFromModuleID, Overrides);
}
UndefMacroDirective *
-Preprocessor::AllocateUndefMacroDirective(SourceLocation UndefLoc) {
- UndefMacroDirective *MD = BP.Allocate<UndefMacroDirective>();
- new (MD) UndefMacroDirective(UndefLoc);
- return MD;
+Preprocessor::AllocateUndefMacroDirective(SourceLocation UndefLoc,
+ unsigned ImportedFromModuleID,
+ ArrayRef<unsigned> Overrides) {
+ unsigned NumExtra = (ImportedFromModuleID ? 1 : 0) + Overrides.size();
+ return new (BP.Allocate(sizeof(UndefMacroDirective) +
+ sizeof(unsigned) * NumExtra,
+ llvm::alignOf<UndefMacroDirective>()))
+ UndefMacroDirective(UndefLoc, ImportedFromModuleID, Overrides);
}
VisibilityMacroDirective *
Preprocessor::AllocateVisibilityMacroDirective(SourceLocation Loc,
bool isPublic) {
- VisibilityMacroDirective *MD = BP.Allocate<VisibilityMacroDirective>();
- new (MD) VisibilityMacroDirective(Loc, isPublic);
- return MD;
-}
-
-/// \brief Release the specified MacroInfo to be reused for allocating
-/// new MacroInfo objects.
-void Preprocessor::ReleaseMacroInfo(MacroInfo *MI) {
- MacroInfoChain *MIChain = (MacroInfoChain *)MI;
- if (MacroInfoChain *Prev = MIChain->Prev) {
- MacroInfoChain *Next = MIChain->Next;
- Prev->Next = Next;
- if (Next)
- Next->Prev = Prev;
- } else {
- assert(MIChainHead == MIChain);
- MIChainHead = MIChain->Next;
- MIChainHead->Prev = nullptr;
- }
- MIChain->Next = MICache;
- MICache = MIChain;
-
- MI->Destroy();
+ return new (BP) VisibilityMacroDirective(Loc, isPublic);
}
/// \brief Read and discard all tokens remaining on the current line until
@@ -128,7 +100,56 @@ void Preprocessor::DiscardUntilEndOfDirective() {
} while (Tmp.isNot(tok::eod));
}
-bool Preprocessor::CheckMacroName(Token &MacroNameTok, char isDefineUndef) {
+/// \brief Enumerates possible cases of #define/#undef a reserved identifier.
+enum MacroDiag {
+ MD_NoWarn, //> Not a reserved identifier
+ MD_KeywordDef, //> Macro hides keyword, enabled by default
+ MD_ReservedMacro //> #define of #undef reserved id, disabled by default
+};
+
+/// \brief Checks if the specified identifier is reserved in the specified
+/// language.
+/// This function does not check if the identifier is a keyword.
+static bool isReservedId(StringRef Text, const LangOptions &Lang) {
+ // C++ [macro.names], C11 7.1.3:
+ // All identifiers that begin with an underscore and either an uppercase
+ // letter or another underscore are always reserved for any use.
+ if (Text.size() >= 2 && Text[0] == '_' &&
+ (isUppercase(Text[1]) || Text[1] == '_'))
+ return true;
+ // C++ [global.names]
+ // Each name that contains a double underscore ... is reserved to the
+ // implementation for any use.
+ if (Lang.CPlusPlus) {
+ if (Text.find("__") != StringRef::npos)
+ return true;
+ }
+ return false;
+}
+
+static MacroDiag shouldWarnOnMacroDef(Preprocessor &PP, IdentifierInfo *II) {
+ const LangOptions &Lang = PP.getLangOpts();
+ StringRef Text = II->getName();
+ if (isReservedId(Text, Lang))
+ return MD_ReservedMacro;
+ if (II->isKeyword(Lang))
+ return MD_KeywordDef;
+ if (Lang.CPlusPlus11 && (Text.equals("override") || Text.equals("final")))
+ return MD_KeywordDef;
+ return MD_NoWarn;
+}
+
+static MacroDiag shouldWarnOnMacroUndef(Preprocessor &PP, IdentifierInfo *II) {
+ const LangOptions &Lang = PP.getLangOpts();
+ StringRef Text = II->getName();
+ // Do not warn on keyword undef. It is generally harmless and widely used.
+ if (isReservedId(Text, Lang))
+ return MD_ReservedMacro;
+ return MD_NoWarn;
+}
+
+bool Preprocessor::CheckMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
+ bool *ShadowFlag) {
// Missing macro name?
if (MacroNameTok.is(tok::eod))
return Diag(MacroNameTok, diag::err_pp_missing_macro_name);
@@ -156,18 +177,42 @@ bool Preprocessor::CheckMacroName(Token &MacroNameTok, char isDefineUndef) {
MacroNameTok.setIdentifierInfo(II);
}
- if (isDefineUndef && II->getPPKeywordID() == tok::pp_defined) {
+ if ((isDefineUndef != MU_Other) && II->getPPKeywordID() == tok::pp_defined) {
// Error if defining "defined": C99 6.10.8/4, C++ [cpp.predefined]p4.
return Diag(MacroNameTok, diag::err_defined_macro_name);
}
- if (isDefineUndef == 2 && II->hasMacroDefinition() &&
+ if (isDefineUndef == MU_Undef && II->hasMacroDefinition() &&
getMacroInfo(II)->isBuiltinMacro()) {
// Warn if undefining "__LINE__" and other builtins, per C99 6.10.8/4
// and C++ [cpp.predefined]p4], but allow it as an extension.
Diag(MacroNameTok, diag::ext_pp_undef_builtin_macro);
}
+ // If defining/undefining reserved identifier or a keyword, we need to issue
+ // a warning.
+ SourceLocation MacroNameLoc = MacroNameTok.getLocation();
+ if (ShadowFlag)
+ *ShadowFlag = false;
+ if (!SourceMgr.isInSystemHeader(MacroNameLoc) &&
+ (strcmp(SourceMgr.getBufferName(MacroNameLoc), "<built-in>") != 0)) {
+ MacroDiag D = MD_NoWarn;
+ if (isDefineUndef == MU_Define) {
+ D = shouldWarnOnMacroDef(*this, II);
+ }
+ else if (isDefineUndef == MU_Undef)
+ D = shouldWarnOnMacroUndef(*this, II);
+ if (D == MD_KeywordDef) {
+ // We do not want to warn on some patterns widely used in configuration
+ // scripts. This requires analyzing next tokens, so do not issue warnings
+ // now, only inform caller.
+ if (ShadowFlag)
+ *ShadowFlag = true;
+ }
+ if (D == MD_ReservedMacro)
+ Diag(MacroNameTok, diag::warn_pp_macro_is_reserved_id);
+ }
+
// Okay, we got a good identifier.
return false;
}
@@ -175,22 +220,25 @@ bool Preprocessor::CheckMacroName(Token &MacroNameTok, char isDefineUndef) {
/// \brief Lex and validate a macro name, which occurs after a
/// \#define or \#undef.
///
-/// This sets the token kind to eod and discards the rest
-/// of the macro line if the macro name is invalid. \p isDefineUndef is 1 if
-/// this is due to a a \#define, 2 if \#undef directive, 0 if it is something
-/// else (e.g. \#ifdef).
-void Preprocessor::ReadMacroName(Token &MacroNameTok, char isDefineUndef) {
+/// This sets the token kind to eod and discards the rest of the macro line if
+/// the macro name is invalid.
+///
+/// \param MacroNameTok Token that is expected to be a macro name.
+/// \param isDefineUndef Context in which macro is used.
+/// \param ShadowFlag Points to a flag that is set if macro shadows a keyword.
+void Preprocessor::ReadMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
+ bool *ShadowFlag) {
// Read the token, don't allow macro expansion on it.
LexUnexpandedToken(MacroNameTok);
if (MacroNameTok.is(tok::code_completion)) {
if (CodeComplete)
- CodeComplete->CodeCompleteMacroName(isDefineUndef == 1);
+ CodeComplete->CodeCompleteMacroName(isDefineUndef == MU_Define);
setCodeCompletionReached();
LexUnexpandedToken(MacroNameTok);
}
- if (!CheckMacroName(MacroNameTok, isDefineUndef))
+ if (!CheckMacroName(MacroNameTok, isDefineUndef, ShadowFlag))
return;
// Invalid macro name, read and discard the rest of the line and set the
@@ -562,6 +610,7 @@ const FileEntry *Preprocessor::LookupFile(
StringRef Filename,
bool isAngled,
const DirectoryLookup *FromDir,
+ const FileEntry *FromFile,
const DirectoryLookup *&CurDir,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
@@ -569,22 +618,33 @@ const FileEntry *Preprocessor::LookupFile(
bool SkipCache) {
// If the header lookup mechanism may be relative to the current inclusion
// stack, record the parent #includes.
- SmallVector<const FileEntry *, 16> Includers;
- if (!FromDir) {
+ SmallVector<std::pair<const FileEntry *, const DirectoryEntry *>, 16>
+ Includers;
+ if (!FromDir && !FromFile) {
FileID FID = getCurrentFileLexer()->getFileID();
const FileEntry *FileEnt = SourceMgr.getFileEntryForID(FID);
// If there is no file entry associated with this file, it must be the
- // predefines buffer. Any other file is not lexed with a normal lexer, so
- // it won't be scanned for preprocessor directives. If we have the
- // predefines buffer, resolve #include references (which come from the
- // -include command line argument) as if they came from the main file, this
- // affects file lookup etc.
- if (!FileEnt)
- FileEnt = SourceMgr.getFileEntryForID(SourceMgr.getMainFileID());
-
- if (FileEnt)
- Includers.push_back(FileEnt);
+ // predefines buffer or the module includes buffer. Any other file is not
+ // lexed with a normal lexer, so it won't be scanned for preprocessor
+ // directives.
+ //
+ // If we have the predefines buffer, resolve #include references (which come
+ // from the -include command line argument) from the current working
+ // directory instead of relative to the main file.
+ //
+ // If we have the module includes buffer, resolve #include references (which
+ // come from header declarations in the module map) relative to the module
+ // map file.
+ if (!FileEnt) {
+ if (FID == SourceMgr.getMainFileID() && MainFileDir)
+ Includers.push_back(std::make_pair(nullptr, MainFileDir));
+ else if ((FileEnt =
+ SourceMgr.getFileEntryForID(SourceMgr.getMainFileID())))
+ Includers.push_back(std::make_pair(FileEnt, FileMgr.getDirectory(".")));
+ } else {
+ Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
+ }
// MSVC searches the current include stack from top to bottom for
// headers included by quoted include directives.
@@ -595,13 +655,35 @@ const FileEntry *Preprocessor::LookupFile(
if (IsFileLexer(ISEntry))
if ((FileEnt = SourceMgr.getFileEntryForID(
ISEntry.ThePPLexer->getFileID())))
- Includers.push_back(FileEnt);
+ Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
}
}
}
- // Do a standard file entry lookup.
CurDir = CurDirLookup;
+
+ if (FromFile) {
+ // We're supposed to start looking from after a particular file. Search
+ // the include path until we find that file or run out of files.
+ const DirectoryLookup *TmpCurDir = CurDir;
+ const DirectoryLookup *TmpFromDir = nullptr;
+ while (const FileEntry *FE = HeaderInfo.LookupFile(
+ Filename, FilenameLoc, isAngled, TmpFromDir, TmpCurDir,
+ Includers, SearchPath, RelativePath, SuggestedModule,
+ SkipCache)) {
+ // Keep looking as if this file did a #include_next.
+ TmpFromDir = TmpCurDir;
+ ++TmpFromDir;
+ if (FE == FromFile) {
+ // Found it.
+ FromDir = TmpFromDir;
+ CurDir = TmpCurDir;
+ break;
+ }
+ }
+ }
+
+ // Do a standard file entry lookup.
const FileEntry *FE = HeaderInfo.LookupFile(
Filename, FilenameLoc, isAngled, FromDir, CurDir, Includers, SearchPath,
RelativePath, SuggestedModule, SkipCache);
@@ -716,7 +798,8 @@ void Preprocessor::HandleDirective(Token &Result) {
case tok::pp_import:
case tok::pp_include_next:
case tok::pp___include_macros:
- Diag(Result, diag::err_embedded_include) << II->getName();
+ case tok::pp_pragma:
+ Diag(Result, diag::err_embedded_directive) << II->getName();
DiscardUntilEndOfDirective();
return;
default:
@@ -1196,7 +1279,7 @@ void Preprocessor::HandleIdentSCCSDirective(Token &Tok) {
/// \brief Handle a #public directive.
void Preprocessor::HandleMacroPublicDirective(Token &Tok) {
Token MacroNameTok;
- ReadMacroName(MacroNameTok, 2);
+ ReadMacroName(MacroNameTok, MU_Undef);
// Error reading macro name? If so, diagnostic already issued.
if (MacroNameTok.is(tok::eod))
@@ -1223,7 +1306,7 @@ void Preprocessor::HandleMacroPublicDirective(Token &Tok) {
/// \brief Handle a #private directive.
void Preprocessor::HandleMacroPrivateDirective(Token &Tok) {
Token MacroNameTok;
- ReadMacroName(MacroNameTok, 2);
+ ReadMacroName(MacroNameTok, MU_Undef);
// Error reading macro name? If so, diagnostic already issued.
if (MacroNameTok.is(tok::eod))
@@ -1378,6 +1461,7 @@ static void EnterAnnotationToken(Preprocessor &PP,
void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
Token &IncludeTok,
const DirectoryLookup *LookupFrom,
+ const FileEntry *LookupFromFile,
bool isImport) {
Token FilenameTok;
@@ -1469,12 +1553,14 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
SmallString<128> NormalizedPath;
if (LangOpts.MSVCCompat) {
NormalizedPath = Filename.str();
- llvm::sys::fs::normalize_separators(NormalizedPath);
+#ifndef LLVM_ON_WIN32
+ llvm::sys::path::native(NormalizedPath);
+#endif
}
const FileEntry *File = LookupFile(
FilenameLoc, LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename,
- isAngled, LookupFrom, CurDir, Callbacks ? &SearchPath : nullptr,
- Callbacks ? &RelativePath : nullptr,
+ isAngled, LookupFrom, LookupFromFile, CurDir,
+ Callbacks ? &SearchPath : nullptr, Callbacks ? &RelativePath : nullptr,
HeaderInfo.getHeaderSearchOpts().ModuleMaps ? &SuggestedModule : nullptr);
if (Callbacks) {
@@ -1488,14 +1574,13 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
HeaderInfo.AddSearchPath(DL, isAngled);
// Try the lookup again, skipping the cache.
- File = LookupFile(FilenameLoc,
- LangOpts.MSVCCompat ? NormalizedPath.c_str()
- : Filename,
- isAngled, LookupFrom, CurDir, nullptr, nullptr,
- HeaderInfo.getHeaderSearchOpts().ModuleMaps
- ? &SuggestedModule
- : nullptr,
- /*SkipCache*/ true);
+ File = LookupFile(
+ FilenameLoc,
+ LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename, isAngled,
+ LookupFrom, LookupFromFile, CurDir, nullptr, nullptr,
+ HeaderInfo.getHeaderSearchOpts().ModuleMaps ? &SuggestedModule
+ : nullptr,
+ /*SkipCache*/ true);
}
}
}
@@ -1517,8 +1602,10 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// provide the user with a possible fixit.
if (isAngled) {
File = LookupFile(
- FilenameLoc, LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename,
- false, LookupFrom, CurDir, Callbacks ? &SearchPath : nullptr,
+ FilenameLoc,
+ LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename, false,
+ LookupFrom, LookupFromFile, CurDir,
+ Callbacks ? &SearchPath : nullptr,
Callbacks ? &RelativePath : nullptr,
HeaderInfo.getHeaderSearchOpts().ModuleMaps ? &SuggestedModule
: nullptr);
@@ -1539,7 +1626,9 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// If we are supposed to import a module rather than including the header,
// do so now.
- if (SuggestedModule && getLangOpts().Modules) {
+ if (SuggestedModule && getLangOpts().Modules &&
+ SuggestedModule.getModule()->getTopLevelModuleName() !=
+ getLangOpts().ImplementationOfModule) {
// Compute the module access path corresponding to this module.
// FIXME: Should we have a second loadModule() overload to avoid this
// extra lookup step?
@@ -1713,9 +1802,16 @@ void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
// the current found directory. If we can't do this, issue a
// diagnostic.
const DirectoryLookup *Lookup = CurDirLookup;
+ const FileEntry *LookupFromFile = nullptr;
if (isInPrimaryFile()) {
Lookup = nullptr;
Diag(IncludeNextTok, diag::pp_include_next_in_primary);
+ } else if (CurSubmodule) {
+ // Start looking up in the directory *after* the one in which the current
+ // file would be found, if any.
+ assert(CurPPLexer && "#include_next directive in macro?");
+ LookupFromFile = CurPPLexer->getFileEntry();
+ Lookup = nullptr;
} else if (!Lookup) {
Diag(IncludeNextTok, diag::pp_include_next_absolute_path);
} else {
@@ -1723,7 +1819,8 @@ void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
++Lookup;
}
- return HandleIncludeDirective(HashLoc, IncludeNextTok, Lookup);
+ return HandleIncludeDirective(HashLoc, IncludeNextTok, Lookup,
+ LookupFromFile);
}
/// HandleMicrosoftImportDirective - Implements \#import for Microsoft Mode
@@ -1749,7 +1846,7 @@ void Preprocessor::HandleImportDirective(SourceLocation HashLoc,
return HandleMicrosoftImportDirective(ImportTok);
Diag(ImportTok, diag::ext_pp_import_directive);
}
- return HandleIncludeDirective(HashLoc, ImportTok, nullptr, true);
+ return HandleIncludeDirective(HashLoc, ImportTok, nullptr, nullptr, true);
}
/// HandleIncludeMacrosDirective - The -imacros command line option turns into a
@@ -1770,7 +1867,7 @@ void Preprocessor::HandleIncludeMacrosDirective(SourceLocation HashLoc,
// Treat this as a normal #include for checking purposes. If this is
// successful, it will push a new lexer onto the include stack.
- HandleIncludeDirective(HashLoc, IncludeMacrosTok, nullptr, false);
+ HandleIncludeDirective(HashLoc, IncludeMacrosTok);
Token TmpTok;
do {
@@ -1878,6 +1975,52 @@ bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI, Token &Tok) {
}
}
+static bool isConfigurationPattern(Token &MacroName, MacroInfo *MI,
+ const LangOptions &LOptions) {
+ if (MI->getNumTokens() == 1) {
+ const Token &Value = MI->getReplacementToken(0);
+
+ // Macro that is identity, like '#define inline inline' is a valid pattern.
+ if (MacroName.getKind() == Value.getKind())
+ return true;
+
+ // Macro that maps a keyword to the same keyword decorated with leading/
+ // trailing underscores is a valid pattern:
+ // #define inline __inline
+ // #define inline __inline__
+ // #define inline _inline (in MS compatibility mode)
+ StringRef MacroText = MacroName.getIdentifierInfo()->getName();
+ if (IdentifierInfo *II = Value.getIdentifierInfo()) {
+ if (!II->isKeyword(LOptions))
+ return false;
+ StringRef ValueText = II->getName();
+ StringRef TrimmedValue = ValueText;
+ if (!ValueText.startswith("__")) {
+ if (ValueText.startswith("_"))
+ TrimmedValue = TrimmedValue.drop_front(1);
+ else
+ return false;
+ } else {
+ TrimmedValue = TrimmedValue.drop_front(2);
+ if (TrimmedValue.endswith("__"))
+ TrimmedValue = TrimmedValue.drop_back(2);
+ }
+ return TrimmedValue.equals(MacroText);
+ } else {
+ return false;
+ }
+ }
+
+ // #define inline
+ if ((MacroName.is(tok::kw_extern) || MacroName.is(tok::kw_inline) ||
+ MacroName.is(tok::kw_static) || MacroName.is(tok::kw_const)) &&
+ MI->getNumTokens() == 0) {
+ return true;
+ }
+
+ return false;
+}
+
/// HandleDefineDirective - Implements \#define. This consumes the entire macro
/// line then lets the caller lex the next real token.
void Preprocessor::HandleDefineDirective(Token &DefineTok,
@@ -1885,7 +2028,8 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok,
++NumDefined;
Token MacroNameTok;
- ReadMacroName(MacroNameTok, 1);
+ bool MacroShadowsKeyword;
+ ReadMacroName(MacroNameTok, MU_Define, &MacroShadowsKeyword);
// Error reading macro name? If so, diagnostic already issued.
if (MacroNameTok.is(tok::eod))
@@ -1921,8 +2065,6 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok,
// This is a function-like macro definition. Read the argument list.
MI->setIsFunctionLike();
if (ReadMacroDefinitionArgList(MI, LastTok)) {
- // Forget about MI.
- ReleaseMacroInfo(MI);
// Throw away the rest of the line.
if (CurPPLexer->ParsingPreprocessorDirective)
DiscardUntilEndOfDirective();
@@ -2047,7 +2189,6 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok,
continue;
} else {
Diag(Tok, diag::err_pp_stringize_not_parameter);
- ReleaseMacroInfo(MI);
// Disable __VA_ARGS__ again.
Ident__VA_ARGS__->setIsPoisoned(true);
@@ -2065,6 +2206,10 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok,
}
}
+ if (MacroShadowsKeyword &&
+ !isConfigurationPattern(MacroNameTok, MI, getLangOpts())) {
+ Diag(MacroNameTok, diag::warn_pp_macro_hides_keyword);
+ }
// Disable __VA_ARGS__ again.
Ident__VA_ARGS__->setIsPoisoned(true);
@@ -2075,12 +2220,10 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok,
if (NumTokens != 0) {
if (MI->getReplacementToken(0).is(tok::hashhash)) {
Diag(MI->getReplacementToken(0), diag::err_paste_at_start);
- ReleaseMacroInfo(MI);
return;
}
if (MI->getReplacementToken(NumTokens-1).is(tok::hashhash)) {
Diag(MI->getReplacementToken(NumTokens-1), diag::err_paste_at_end);
- ReleaseMacroInfo(MI);
return;
}
}
@@ -2138,7 +2281,7 @@ void Preprocessor::HandleUndefDirective(Token &UndefTok) {
++NumUndefined;
Token MacroNameTok;
- ReadMacroName(MacroNameTok, 2);
+ ReadMacroName(MacroNameTok, MU_Undef);
// Error reading macro name? If so, diagnostic already issued.
if (MacroNameTok.is(tok::eod))
diff --git a/lib/Lex/PPExpressions.cpp b/lib/Lex/PPExpressions.cpp
index 2260bf98f7a5..9cf72cf8f8fb 100644
--- a/lib/Lex/PPExpressions.cpp
+++ b/lib/Lex/PPExpressions.cpp
@@ -103,7 +103,7 @@ static bool EvaluateDefined(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
}
// If we don't have a pp-identifier now, this is an error.
- if (PP.CheckMacroName(PeekTok, 0))
+ if (PP.CheckMacroName(PeekTok, MU_Other))
return true;
// Otherwise, we got an identifier, is it defined to something?
@@ -244,7 +244,9 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
// Parse the integer literal into Result.
if (Literal.GetIntegerValue(Result.Val)) {
// Overflow parsing integer literal.
- if (ValueLive) PP.Diag(PeekTok, diag::err_integer_too_large);
+ if (ValueLive)
+ PP.Diag(PeekTok, diag::err_integer_literal_too_large)
+ << /* Unsigned */ 1;
Result.Val.setIsUnsigned(true);
} else {
// Set the signedness of the result to match whether there was a U suffix
@@ -259,7 +261,7 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
// Octal, hexadecimal, and binary literals are implicitly unsigned if
// the value does not fit into a signed integer type.
if (ValueLive && Literal.getRadix() == 10)
- PP.Diag(PeekTok, diag::ext_integer_too_large_for_signed);
+ PP.Diag(PeekTok, diag::ext_integer_literal_too_large_for_signed);
Result.Val.setIsUnsigned(true);
}
}
@@ -271,6 +273,7 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
}
case tok::char_constant: // 'x'
case tok::wide_char_constant: // L'x'
+ case tok::utf8_char_constant: // u8'x'
case tok::utf16_char_constant: // u'x'
case tok::utf32_char_constant: { // U'x'
// Complain about, and drop, any ud-suffix.
@@ -597,15 +600,10 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
break;
case tok::lessless: {
// Determine whether overflow is about to happen.
- unsigned ShAmt = static_cast<unsigned>(RHS.Val.getLimitedValue());
- if (LHS.isUnsigned()) {
- Overflow = ShAmt >= LHS.Val.getBitWidth();
- if (Overflow)
- ShAmt = LHS.Val.getBitWidth()-1;
- Res = LHS.Val << ShAmt;
- } else {
- Res = llvm::APSInt(LHS.Val.sshl_ov(ShAmt, Overflow), false);
- }
+ if (LHS.isUnsigned())
+ Res = LHS.Val.ushl_ov(RHS.Val, Overflow);
+ else
+ Res = llvm::APSInt(LHS.Val.sshl_ov(RHS.Val, Overflow), false);
break;
}
case tok::greatergreater: {
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
index 22ee971f4889..fb5e2b05808c 100644
--- a/lib/Lex/PPLexerChange.cpp
+++ b/lib/Lex/PPLexerChange.cpp
@@ -160,17 +160,17 @@ void Preprocessor::EnterSourceFileWithPTH(PTHLexer *PL,
/// tokens from it instead of the current buffer.
void Preprocessor::EnterMacro(Token &Tok, SourceLocation ILEnd,
MacroInfo *Macro, MacroArgs *Args) {
- TokenLexer *TokLexer;
+ std::unique_ptr<TokenLexer> TokLexer;
if (NumCachedTokenLexers == 0) {
- TokLexer = new TokenLexer(Tok, ILEnd, Macro, Args, *this);
+ TokLexer = llvm::make_unique<TokenLexer>(Tok, ILEnd, Macro, Args, *this);
} else {
- TokLexer = TokenLexerCache[--NumCachedTokenLexers];
+ TokLexer = std::move(TokenLexerCache[--NumCachedTokenLexers]);
TokLexer->Init(Tok, ILEnd, Macro, Args);
}
PushIncludeMacroStack();
CurDirLookup = nullptr;
- CurTokenLexer.reset(TokLexer);
+ CurTokenLexer = std::move(TokLexer);
if (CurLexerKind != CLK_LexAfterModuleImport)
CurLexerKind = CLK_TokenLexer;
}
@@ -190,20 +190,39 @@ void Preprocessor::EnterMacro(Token &Tok, SourceLocation ILEnd,
void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
bool DisableMacroExpansion,
bool OwnsTokens) {
+ if (CurLexerKind == CLK_CachingLexer) {
+ if (CachedLexPos < CachedTokens.size()) {
+ // We're entering tokens into the middle of our cached token stream. We
+ // can't represent that, so just insert the tokens into the buffer.
+ CachedTokens.insert(CachedTokens.begin() + CachedLexPos,
+ Toks, Toks + NumToks);
+ if (OwnsTokens)
+ delete [] Toks;
+ return;
+ }
+
+ // New tokens are at the end of the cached token sequnece; insert the
+ // token stream underneath the caching lexer.
+ ExitCachingLexMode();
+ EnterTokenStream(Toks, NumToks, DisableMacroExpansion, OwnsTokens);
+ EnterCachingLexMode();
+ return;
+ }
+
// Create a macro expander to expand from the specified token stream.
- TokenLexer *TokLexer;
+ std::unique_ptr<TokenLexer> TokLexer;
if (NumCachedTokenLexers == 0) {
- TokLexer = new TokenLexer(Toks, NumToks, DisableMacroExpansion,
- OwnsTokens, *this);
+ TokLexer = llvm::make_unique<TokenLexer>(
+ Toks, NumToks, DisableMacroExpansion, OwnsTokens, *this);
} else {
- TokLexer = TokenLexerCache[--NumCachedTokenLexers];
+ TokLexer = std::move(TokenLexerCache[--NumCachedTokenLexers]);
TokLexer->Init(Toks, NumToks, DisableMacroExpansion, OwnsTokens);
}
// Save our current state.
PushIncludeMacroStack();
CurDirLookup = nullptr;
- CurTokenLexer.reset(TokLexer);
+ CurTokenLexer = std::move(TokLexer);
if (CurLexerKind != CLK_LexAfterModuleImport)
CurLexerKind = CLK_TokenLexer;
}
@@ -480,33 +499,6 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
}
}
}
-
- // Check whether there are any headers that were included, but not
- // mentioned at all in the module map. Such headers
- SourceLocation StartLoc
- = SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
- if (!getDiagnostics().isIgnored(diag::warn_forgotten_module_header,
- StartLoc)) {
- ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap();
- for (unsigned I = 0, N = SourceMgr.local_sloc_entry_size(); I != N; ++I) {
- // We only care about file entries.
- const SrcMgr::SLocEntry &Entry = SourceMgr.getLocalSLocEntry(I);
- if (!Entry.isFile())
- continue;
-
- // Dig out the actual file.
- const FileEntry *File = Entry.getFile().getContentCache()->OrigEntry;
- if (!File)
- continue;
-
- // If it's not part of a module and not unknown, complain.
- if (!ModMap.findModuleForHeader(File) &&
- !ModMap.isHeaderInUnavailableModule(File)) {
- Diag(StartLoc, diag::warn_forgotten_module_header)
- << File->getName() << Mod->getFullModuleName();
- }
- }
- }
}
return true;
@@ -526,7 +518,7 @@ bool Preprocessor::HandleEndOfTokenLexer(Token &Result) {
if (NumCachedTokenLexers == TokenLexerCacheSize)
CurTokenLexer.reset();
else
- TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.release();
+ TokenLexerCache[NumCachedTokenLexers++] = std::move(CurTokenLexer);
// Handle this like a #include file being popped off the stack.
return HandleEndOfFile(Result, true);
@@ -543,7 +535,7 @@ void Preprocessor::RemoveTopOfLexerStack() {
if (NumCachedTokenLexers == TokenLexerCacheSize)
CurTokenLexer.reset();
else
- TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.release();
+ TokenLexerCache[NumCachedTokenLexers++] = std::move(CurTokenLexer);
}
PopIncludeMacroStack();
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index 2d7c6d4b5227..cd05d06633f2 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -49,7 +49,10 @@ void Preprocessor::appendMacroDirective(IdentifierInfo *II, MacroDirective *MD){
MacroDirective *&StoredMD = Macros[II];
MD->setPrevious(StoredMD);
StoredMD = MD;
- II->setHasMacroDefinition(MD->isDefined());
+ // Setup the identifier as having associated macro history.
+ II->setHasMacroDefinition(true);
+ if (!MD->isDefined())
+ II->setHasMacroDefinition(false);
bool isImportedMacro = isa<DefMacroDirective>(MD) &&
cast<DefMacroDirective>(MD)->isImported();
if (II->isFromAST() && !isImportedMacro)
@@ -93,6 +96,9 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident__COUNTER__ = RegisterBuiltinMacro(*this, "__COUNTER__");
Ident_Pragma = RegisterBuiltinMacro(*this, "_Pragma");
+ // C++ Standing Document Extensions.
+ Ident__has_cpp_attribute = RegisterBuiltinMacro(*this, "__has_cpp_attribute");
+
// GCC Extensions.
Ident__BASE_FILE__ = RegisterBuiltinMacro(*this, "__BASE_FILE__");
Ident__INCLUDE_LEVEL__ = RegisterBuiltinMacro(*this, "__INCLUDE_LEVEL__");
@@ -112,6 +118,7 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident__has_extension = RegisterBuiltinMacro(*this, "__has_extension");
Ident__has_builtin = RegisterBuiltinMacro(*this, "__has_builtin");
Ident__has_attribute = RegisterBuiltinMacro(*this, "__has_attribute");
+ Ident__has_declspec = RegisterBuiltinMacro(*this, "__has_declspec_attribute");
Ident__has_include = RegisterBuiltinMacro(*this, "__has_include");
Ident__has_include_next = RegisterBuiltinMacro(*this, "__has_include_next");
Ident__has_warning = RegisterBuiltinMacro(*this, "__has_warning");
@@ -857,144 +864,148 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
Feature = Feature.substr(2, Feature.size() - 4);
return llvm::StringSwitch<bool>(Feature)
- .Case("address_sanitizer", LangOpts.Sanitize.Address)
- .Case("attribute_analyzer_noreturn", true)
- .Case("attribute_availability", true)
- .Case("attribute_availability_with_message", true)
- .Case("attribute_cf_returns_not_retained", true)
- .Case("attribute_cf_returns_retained", true)
- .Case("attribute_deprecated_with_message", true)
- .Case("attribute_ext_vector_type", true)
- .Case("attribute_ns_returns_not_retained", true)
- .Case("attribute_ns_returns_retained", true)
- .Case("attribute_ns_consumes_self", true)
- .Case("attribute_ns_consumed", true)
- .Case("attribute_cf_consumed", true)
- .Case("attribute_objc_ivar_unused", true)
- .Case("attribute_objc_method_family", true)
- .Case("attribute_overloadable", true)
- .Case("attribute_unavailable_with_message", true)
- .Case("attribute_unused_on_fields", true)
- .Case("blocks", LangOpts.Blocks)
- .Case("c_thread_safety_attributes", true)
- .Case("cxx_exceptions", LangOpts.CXXExceptions)
- .Case("cxx_rtti", LangOpts.RTTI)
- .Case("enumerator_attributes", true)
- .Case("memory_sanitizer", LangOpts.Sanitize.Memory)
- .Case("thread_sanitizer", LangOpts.Sanitize.Thread)
- .Case("dataflow_sanitizer", LangOpts.Sanitize.DataFlow)
- // Objective-C features
- .Case("objc_arr", LangOpts.ObjCAutoRefCount) // FIXME: REMOVE?
- .Case("objc_arc", LangOpts.ObjCAutoRefCount)
- .Case("objc_arc_weak", LangOpts.ObjCARCWeak)
- .Case("objc_default_synthesize_properties", LangOpts.ObjC2)
- .Case("objc_fixed_enum", LangOpts.ObjC2)
- .Case("objc_instancetype", LangOpts.ObjC2)
- .Case("objc_modules", LangOpts.ObjC2 && LangOpts.Modules)
- .Case("objc_nonfragile_abi", LangOpts.ObjCRuntime.isNonFragile())
- .Case("objc_property_explicit_atomic", true) // Does clang support explicit "atomic" keyword?
- .Case("objc_protocol_qualifier_mangling", true)
- .Case("objc_weak_class", LangOpts.ObjCRuntime.hasWeakClassImport())
- .Case("ownership_holds", true)
- .Case("ownership_returns", true)
- .Case("ownership_takes", true)
- .Case("objc_bool", true)
- .Case("objc_subscripting", LangOpts.ObjCRuntime.isNonFragile())
- .Case("objc_array_literals", LangOpts.ObjC2)
- .Case("objc_dictionary_literals", LangOpts.ObjC2)
- .Case("objc_boxed_expressions", LangOpts.ObjC2)
- .Case("arc_cf_code_audited", true)
- // C11 features
- .Case("c_alignas", LangOpts.C11)
- .Case("c_atomic", LangOpts.C11)
- .Case("c_generic_selections", LangOpts.C11)
- .Case("c_static_assert", LangOpts.C11)
- .Case("c_thread_local",
- LangOpts.C11 && PP.getTargetInfo().isTLSSupported())
- // C++11 features
- .Case("cxx_access_control_sfinae", LangOpts.CPlusPlus11)
- .Case("cxx_alias_templates", LangOpts.CPlusPlus11)
- .Case("cxx_alignas", LangOpts.CPlusPlus11)
- .Case("cxx_atomic", LangOpts.CPlusPlus11)
- .Case("cxx_attributes", LangOpts.CPlusPlus11)
- .Case("cxx_auto_type", LangOpts.CPlusPlus11)
- .Case("cxx_constexpr", LangOpts.CPlusPlus11)
- .Case("cxx_decltype", LangOpts.CPlusPlus11)
- .Case("cxx_decltype_incomplete_return_types", LangOpts.CPlusPlus11)
- .Case("cxx_default_function_template_args", LangOpts.CPlusPlus11)
- .Case("cxx_defaulted_functions", LangOpts.CPlusPlus11)
- .Case("cxx_delegating_constructors", LangOpts.CPlusPlus11)
- .Case("cxx_deleted_functions", LangOpts.CPlusPlus11)
- .Case("cxx_explicit_conversions", LangOpts.CPlusPlus11)
- .Case("cxx_generalized_initializers", LangOpts.CPlusPlus11)
- .Case("cxx_implicit_moves", LangOpts.CPlusPlus11)
- .Case("cxx_inheriting_constructors", LangOpts.CPlusPlus11)
- .Case("cxx_inline_namespaces", LangOpts.CPlusPlus11)
- .Case("cxx_lambdas", LangOpts.CPlusPlus11)
- .Case("cxx_local_type_template_args", LangOpts.CPlusPlus11)
- .Case("cxx_nonstatic_member_init", LangOpts.CPlusPlus11)
- .Case("cxx_noexcept", LangOpts.CPlusPlus11)
- .Case("cxx_nullptr", LangOpts.CPlusPlus11)
- .Case("cxx_override_control", LangOpts.CPlusPlus11)
- .Case("cxx_range_for", LangOpts.CPlusPlus11)
- .Case("cxx_raw_string_literals", LangOpts.CPlusPlus11)
- .Case("cxx_reference_qualified_functions", LangOpts.CPlusPlus11)
- .Case("cxx_rvalue_references", LangOpts.CPlusPlus11)
- .Case("cxx_strong_enums", LangOpts.CPlusPlus11)
- .Case("cxx_static_assert", LangOpts.CPlusPlus11)
- .Case("cxx_thread_local",
- LangOpts.CPlusPlus11 && PP.getTargetInfo().isTLSSupported())
- .Case("cxx_trailing_return", LangOpts.CPlusPlus11)
- .Case("cxx_unicode_literals", LangOpts.CPlusPlus11)
- .Case("cxx_unrestricted_unions", LangOpts.CPlusPlus11)
- .Case("cxx_user_literals", LangOpts.CPlusPlus11)
- .Case("cxx_variadic_templates", LangOpts.CPlusPlus11)
- // C++1y features
- .Case("cxx_aggregate_nsdmi", LangOpts.CPlusPlus1y)
- .Case("cxx_binary_literals", LangOpts.CPlusPlus1y)
- .Case("cxx_contextual_conversions", LangOpts.CPlusPlus1y)
- .Case("cxx_decltype_auto", LangOpts.CPlusPlus1y)
- .Case("cxx_generic_lambdas", LangOpts.CPlusPlus1y)
- .Case("cxx_init_captures", LangOpts.CPlusPlus1y)
- .Case("cxx_relaxed_constexpr", LangOpts.CPlusPlus1y)
- .Case("cxx_return_type_deduction", LangOpts.CPlusPlus1y)
- .Case("cxx_variable_templates", LangOpts.CPlusPlus1y)
- // C++ TSes
- //.Case("cxx_runtime_arrays", LangOpts.CPlusPlusTSArrays)
- //.Case("cxx_concepts", LangOpts.CPlusPlusTSConcepts)
- // FIXME: Should this be __has_feature or __has_extension?
- //.Case("raw_invocation_type", LangOpts.CPlusPlus)
- // Type traits
- .Case("has_nothrow_assign", LangOpts.CPlusPlus)
- .Case("has_nothrow_copy", LangOpts.CPlusPlus)
- .Case("has_nothrow_constructor", LangOpts.CPlusPlus)
- .Case("has_trivial_assign", LangOpts.CPlusPlus)
- .Case("has_trivial_copy", LangOpts.CPlusPlus)
- .Case("has_trivial_constructor", LangOpts.CPlusPlus)
- .Case("has_trivial_destructor", LangOpts.CPlusPlus)
- .Case("has_virtual_destructor", LangOpts.CPlusPlus)
- .Case("is_abstract", LangOpts.CPlusPlus)
- .Case("is_base_of", LangOpts.CPlusPlus)
- .Case("is_class", LangOpts.CPlusPlus)
- .Case("is_constructible", LangOpts.CPlusPlus)
- .Case("is_convertible_to", LangOpts.CPlusPlus)
- .Case("is_empty", LangOpts.CPlusPlus)
- .Case("is_enum", LangOpts.CPlusPlus)
- .Case("is_final", LangOpts.CPlusPlus)
- .Case("is_literal", LangOpts.CPlusPlus)
- .Case("is_standard_layout", LangOpts.CPlusPlus)
- .Case("is_pod", LangOpts.CPlusPlus)
- .Case("is_polymorphic", LangOpts.CPlusPlus)
- .Case("is_sealed", LangOpts.MicrosoftExt)
- .Case("is_trivial", LangOpts.CPlusPlus)
- .Case("is_trivially_assignable", LangOpts.CPlusPlus)
- .Case("is_trivially_constructible", LangOpts.CPlusPlus)
- .Case("is_trivially_copyable", LangOpts.CPlusPlus)
- .Case("is_union", LangOpts.CPlusPlus)
- .Case("modules", LangOpts.Modules)
- .Case("tls", PP.getTargetInfo().isTLSSupported())
- .Case("underlying_type", LangOpts.CPlusPlus)
- .Default(false);
+ .Case("address_sanitizer", LangOpts.Sanitize.has(SanitizerKind::Address))
+ .Case("attribute_analyzer_noreturn", true)
+ .Case("attribute_availability", true)
+ .Case("attribute_availability_with_message", true)
+ .Case("attribute_cf_returns_not_retained", true)
+ .Case("attribute_cf_returns_retained", true)
+ .Case("attribute_deprecated_with_message", true)
+ .Case("attribute_ext_vector_type", true)
+ .Case("attribute_ns_returns_not_retained", true)
+ .Case("attribute_ns_returns_retained", true)
+ .Case("attribute_ns_consumes_self", true)
+ .Case("attribute_ns_consumed", true)
+ .Case("attribute_cf_consumed", true)
+ .Case("attribute_objc_ivar_unused", true)
+ .Case("attribute_objc_method_family", true)
+ .Case("attribute_overloadable", true)
+ .Case("attribute_unavailable_with_message", true)
+ .Case("attribute_unused_on_fields", true)
+ .Case("blocks", LangOpts.Blocks)
+ .Case("c_thread_safety_attributes", true)
+ .Case("cxx_exceptions", LangOpts.CXXExceptions)
+ .Case("cxx_rtti", LangOpts.RTTI)
+ .Case("enumerator_attributes", true)
+ .Case("memory_sanitizer", LangOpts.Sanitize.has(SanitizerKind::Memory))
+ .Case("thread_sanitizer", LangOpts.Sanitize.has(SanitizerKind::Thread))
+ .Case("dataflow_sanitizer", LangOpts.Sanitize.has(SanitizerKind::DataFlow))
+ // Objective-C features
+ .Case("objc_arr", LangOpts.ObjCAutoRefCount) // FIXME: REMOVE?
+ .Case("objc_arc", LangOpts.ObjCAutoRefCount)
+ .Case("objc_arc_weak", LangOpts.ObjCARCWeak)
+ .Case("objc_default_synthesize_properties", LangOpts.ObjC2)
+ .Case("objc_fixed_enum", LangOpts.ObjC2)
+ .Case("objc_instancetype", LangOpts.ObjC2)
+ .Case("objc_modules", LangOpts.ObjC2 && LangOpts.Modules)
+ .Case("objc_nonfragile_abi", LangOpts.ObjCRuntime.isNonFragile())
+ .Case("objc_property_explicit_atomic",
+ true) // Does clang support explicit "atomic" keyword?
+ .Case("objc_protocol_qualifier_mangling", true)
+ .Case("objc_weak_class", LangOpts.ObjCRuntime.hasWeakClassImport())
+ .Case("ownership_holds", true)
+ .Case("ownership_returns", true)
+ .Case("ownership_takes", true)
+ .Case("objc_bool", true)
+ .Case("objc_subscripting", LangOpts.ObjCRuntime.isNonFragile())
+ .Case("objc_array_literals", LangOpts.ObjC2)
+ .Case("objc_dictionary_literals", LangOpts.ObjC2)
+ .Case("objc_boxed_expressions", LangOpts.ObjC2)
+ .Case("arc_cf_code_audited", true)
+ .Case("objc_bridge_id", LangOpts.ObjC2)
+ // C11 features
+ .Case("c_alignas", LangOpts.C11)
+ .Case("c_alignof", LangOpts.C11)
+ .Case("c_atomic", LangOpts.C11)
+ .Case("c_generic_selections", LangOpts.C11)
+ .Case("c_static_assert", LangOpts.C11)
+ .Case("c_thread_local",
+ LangOpts.C11 && PP.getTargetInfo().isTLSSupported())
+ // C++11 features
+ .Case("cxx_access_control_sfinae", LangOpts.CPlusPlus11)
+ .Case("cxx_alias_templates", LangOpts.CPlusPlus11)
+ .Case("cxx_alignas", LangOpts.CPlusPlus11)
+ .Case("cxx_alignof", LangOpts.CPlusPlus11)
+ .Case("cxx_atomic", LangOpts.CPlusPlus11)
+ .Case("cxx_attributes", LangOpts.CPlusPlus11)
+ .Case("cxx_auto_type", LangOpts.CPlusPlus11)
+ .Case("cxx_constexpr", LangOpts.CPlusPlus11)
+ .Case("cxx_decltype", LangOpts.CPlusPlus11)
+ .Case("cxx_decltype_incomplete_return_types", LangOpts.CPlusPlus11)
+ .Case("cxx_default_function_template_args", LangOpts.CPlusPlus11)
+ .Case("cxx_defaulted_functions", LangOpts.CPlusPlus11)
+ .Case("cxx_delegating_constructors", LangOpts.CPlusPlus11)
+ .Case("cxx_deleted_functions", LangOpts.CPlusPlus11)
+ .Case("cxx_explicit_conversions", LangOpts.CPlusPlus11)
+ .Case("cxx_generalized_initializers", LangOpts.CPlusPlus11)
+ .Case("cxx_implicit_moves", LangOpts.CPlusPlus11)
+ .Case("cxx_inheriting_constructors", LangOpts.CPlusPlus11)
+ .Case("cxx_inline_namespaces", LangOpts.CPlusPlus11)
+ .Case("cxx_lambdas", LangOpts.CPlusPlus11)
+ .Case("cxx_local_type_template_args", LangOpts.CPlusPlus11)
+ .Case("cxx_nonstatic_member_init", LangOpts.CPlusPlus11)
+ .Case("cxx_noexcept", LangOpts.CPlusPlus11)
+ .Case("cxx_nullptr", LangOpts.CPlusPlus11)
+ .Case("cxx_override_control", LangOpts.CPlusPlus11)
+ .Case("cxx_range_for", LangOpts.CPlusPlus11)
+ .Case("cxx_raw_string_literals", LangOpts.CPlusPlus11)
+ .Case("cxx_reference_qualified_functions", LangOpts.CPlusPlus11)
+ .Case("cxx_rvalue_references", LangOpts.CPlusPlus11)
+ .Case("cxx_strong_enums", LangOpts.CPlusPlus11)
+ .Case("cxx_static_assert", LangOpts.CPlusPlus11)
+ .Case("cxx_thread_local",
+ LangOpts.CPlusPlus11 && PP.getTargetInfo().isTLSSupported())
+ .Case("cxx_trailing_return", LangOpts.CPlusPlus11)
+ .Case("cxx_unicode_literals", LangOpts.CPlusPlus11)
+ .Case("cxx_unrestricted_unions", LangOpts.CPlusPlus11)
+ .Case("cxx_user_literals", LangOpts.CPlusPlus11)
+ .Case("cxx_variadic_templates", LangOpts.CPlusPlus11)
+ // C++1y features
+ .Case("cxx_aggregate_nsdmi", LangOpts.CPlusPlus14)
+ .Case("cxx_binary_literals", LangOpts.CPlusPlus14)
+ .Case("cxx_contextual_conversions", LangOpts.CPlusPlus14)
+ .Case("cxx_decltype_auto", LangOpts.CPlusPlus14)
+ .Case("cxx_generic_lambdas", LangOpts.CPlusPlus14)
+ .Case("cxx_init_captures", LangOpts.CPlusPlus14)
+ .Case("cxx_relaxed_constexpr", LangOpts.CPlusPlus14)
+ .Case("cxx_return_type_deduction", LangOpts.CPlusPlus14)
+ .Case("cxx_variable_templates", LangOpts.CPlusPlus14)
+ // C++ TSes
+ //.Case("cxx_runtime_arrays", LangOpts.CPlusPlusTSArrays)
+ //.Case("cxx_concepts", LangOpts.CPlusPlusTSConcepts)
+ // FIXME: Should this be __has_feature or __has_extension?
+ //.Case("raw_invocation_type", LangOpts.CPlusPlus)
+ // Type traits
+ .Case("has_nothrow_assign", LangOpts.CPlusPlus)
+ .Case("has_nothrow_copy", LangOpts.CPlusPlus)
+ .Case("has_nothrow_constructor", LangOpts.CPlusPlus)
+ .Case("has_trivial_assign", LangOpts.CPlusPlus)
+ .Case("has_trivial_copy", LangOpts.CPlusPlus)
+ .Case("has_trivial_constructor", LangOpts.CPlusPlus)
+ .Case("has_trivial_destructor", LangOpts.CPlusPlus)
+ .Case("has_virtual_destructor", LangOpts.CPlusPlus)
+ .Case("is_abstract", LangOpts.CPlusPlus)
+ .Case("is_base_of", LangOpts.CPlusPlus)
+ .Case("is_class", LangOpts.CPlusPlus)
+ .Case("is_constructible", LangOpts.CPlusPlus)
+ .Case("is_convertible_to", LangOpts.CPlusPlus)
+ .Case("is_empty", LangOpts.CPlusPlus)
+ .Case("is_enum", LangOpts.CPlusPlus)
+ .Case("is_final", LangOpts.CPlusPlus)
+ .Case("is_literal", LangOpts.CPlusPlus)
+ .Case("is_standard_layout", LangOpts.CPlusPlus)
+ .Case("is_pod", LangOpts.CPlusPlus)
+ .Case("is_polymorphic", LangOpts.CPlusPlus)
+ .Case("is_sealed", LangOpts.MicrosoftExt)
+ .Case("is_trivial", LangOpts.CPlusPlus)
+ .Case("is_trivially_assignable", LangOpts.CPlusPlus)
+ .Case("is_trivially_constructible", LangOpts.CPlusPlus)
+ .Case("is_trivially_copyable", LangOpts.CPlusPlus)
+ .Case("is_union", LangOpts.CPlusPlus)
+ .Case("modules", LangOpts.Modules)
+ .Case("tls", PP.getTargetInfo().isTLSSupported())
+ .Case("underlying_type", LangOpts.CPlusPlus)
+ .Default(false);
}
/// HasExtension - Return true if we recognize and implement the feature
@@ -1023,6 +1034,7 @@ static bool HasExtension(const Preprocessor &PP, const IdentifierInfo *II) {
return llvm::StringSwitch<bool>(Extension)
// C11 features supported by other languages as extensions.
.Case("c_alignas", true)
+ .Case("c_alignof", true)
.Case("c_atomic", true)
.Case("c_generic_selections", true)
.Case("c_static_assert", true)
@@ -1050,7 +1062,8 @@ static bool HasExtension(const Preprocessor &PP, const IdentifierInfo *II) {
/// Returns true if successful.
static bool EvaluateHasIncludeCommon(Token &Tok,
IdentifierInfo *II, Preprocessor &PP,
- const DirectoryLookup *LookupFrom) {
+ const DirectoryLookup *LookupFrom,
+ const FileEntry *LookupFromFile) {
// Save the location of the current token. If a '(' is later found, use
// that location. If not, use the end of this location instead.
SourceLocation LParenLoc = Tok.getLocation();
@@ -1145,8 +1158,8 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
// Search include directories.
const DirectoryLookup *CurDir;
const FileEntry *File =
- PP.LookupFile(FilenameLoc, Filename, isAngled, LookupFrom, CurDir,
- nullptr, nullptr, nullptr);
+ PP.LookupFile(FilenameLoc, Filename, isAngled, LookupFrom, LookupFromFile,
+ CurDir, nullptr, nullptr, nullptr);
// Get the result value. A result of true means the file exists.
return File != nullptr;
@@ -1156,7 +1169,7 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
/// Returns true if successful.
static bool EvaluateHasInclude(Token &Tok, IdentifierInfo *II,
Preprocessor &PP) {
- return EvaluateHasIncludeCommon(Tok, II, PP, nullptr);
+ return EvaluateHasIncludeCommon(Tok, II, PP, nullptr, nullptr);
}
/// EvaluateHasIncludeNext - Process '__has_include_next("path")' expression.
@@ -1166,10 +1179,19 @@ static bool EvaluateHasIncludeNext(Token &Tok,
// __has_include_next is like __has_include, except that we start
// searching after the current found directory. If we can't do this,
// issue a diagnostic.
+ // FIXME: Factor out duplication wiht
+ // Preprocessor::HandleIncludeNextDirective.
const DirectoryLookup *Lookup = PP.GetCurDirLookup();
+ const FileEntry *LookupFromFile = nullptr;
if (PP.isInPrimaryFile()) {
Lookup = nullptr;
PP.Diag(Tok, diag::pp_include_next_in_primary);
+ } else if (PP.getCurrentSubmodule()) {
+ // Start looking up in the directory *after* the one in which the current
+ // file would be found, if any.
+ assert(PP.getCurrentLexer() && "#include_next directive in macro?");
+ LookupFromFile = PP.getCurrentLexer()->getFileEntry();
+ Lookup = nullptr;
} else if (!Lookup) {
PP.Diag(Tok, diag::pp_include_next_absolute_path);
} else {
@@ -1177,7 +1199,7 @@ static bool EvaluateHasIncludeNext(Token &Tok,
++Lookup;
}
- return EvaluateHasIncludeCommon(Tok, II, PP, Lookup);
+ return EvaluateHasIncludeCommon(Tok, II, PP, Lookup, LookupFromFile);
}
/// \brief Process __building_module(identifier) expression.
@@ -1360,12 +1382,15 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
II == Ident__has_extension ||
II == Ident__has_builtin ||
II == Ident__is_identifier ||
- II == Ident__has_attribute) {
+ II == Ident__has_attribute ||
+ II == Ident__has_declspec ||
+ II == Ident__has_cpp_attribute) {
// The argument to these builtins should be a parenthesized identifier.
SourceLocation StartLoc = Tok.getLocation();
bool IsValid = false;
IdentifierInfo *FeatureII = nullptr;
+ IdentifierInfo *ScopeII = nullptr;
// Read the '('.
LexUnexpandedToken(Tok);
@@ -1373,14 +1398,30 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// Read the identifier
LexUnexpandedToken(Tok);
if ((FeatureII = Tok.getIdentifierInfo())) {
- // Read the ')'.
+ // If we're checking __has_cpp_attribute, it is possible to receive a
+ // scope token. Read the "::", if it's available.
LexUnexpandedToken(Tok);
- if (Tok.is(tok::r_paren))
+ bool IsScopeValid = true;
+ if (II == Ident__has_cpp_attribute && Tok.is(tok::coloncolon)) {
+ LexUnexpandedToken(Tok);
+ // The first thing we read was not the feature, it was the scope.
+ ScopeII = FeatureII;
+ if ((FeatureII = Tok.getIdentifierInfo()))
+ LexUnexpandedToken(Tok);
+ else
+ IsScopeValid = false;
+ }
+ // Read the closing paren.
+ if (IsScopeValid && Tok.is(tok::r_paren))
IsValid = true;
}
+ // Eat tokens until ')'.
+ while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::eod) &&
+ Tok.isNot(tok::eof))
+ LexUnexpandedToken(Tok);
}
- bool Value = false;
+ int Value = 0;
if (!IsValid)
Diag(StartLoc, diag::err_feature_check_malformed);
else if (II == Ident__is_identifier)
@@ -1389,7 +1430,13 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// Check for a builtin is trivial.
Value = FeatureII->getBuiltinID() != 0;
} else if (II == Ident__has_attribute)
- Value = hasAttribute(AttrSyntax::Generic, nullptr, FeatureII,
+ Value = hasAttribute(AttrSyntax::GNU, nullptr, FeatureII,
+ getTargetInfo().getTriple(), getLangOpts());
+ else if (II == Ident__has_cpp_attribute)
+ Value = hasAttribute(AttrSyntax::CXX, ScopeII, FeatureII,
+ getTargetInfo().getTriple(), getLangOpts());
+ else if (II == Ident__has_declspec)
+ Value = hasAttribute(AttrSyntax::Declspec, nullptr, FeatureII,
getTargetInfo().getTriple(), getLangOpts());
else if (II == Ident__has_extension)
Value = HasExtension(*this, FeatureII);
@@ -1398,9 +1445,10 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
Value = HasFeature(*this, FeatureII);
}
- OS << (int)Value;
- if (IsValid)
- Tok.setKind(tok::numeric_constant);
+ if (!IsValid)
+ return;
+ OS << Value;
+ Tok.setKind(tok::numeric_constant);
} else if (II == Ident__has_include ||
II == Ident__has_include_next) {
// The argument to these two builtins should be a parenthesized
@@ -1464,9 +1512,10 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
WarningName.substr(2), Diags);
} while (false);
+ if (!IsValid)
+ return;
OS << (int)Value;
- if (IsValid)
- Tok.setKind(tok::numeric_constant);
+ Tok.setKind(tok::numeric_constant);
} else if (II == Ident__building_module) {
// The argument to this builtin should be an identifier. The
// builtin evaluates to 1 when that identifier names the module we are
diff --git a/lib/Lex/PTHLexer.cpp b/lib/Lex/PTHLexer.cpp
index fce31c4be296..af7a15384e35 100644
--- a/lib/Lex/PTHLexer.cpp
+++ b/lib/Lex/PTHLexer.cpp
@@ -24,7 +24,6 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/OnDiskHashTable.h"
#include <memory>
#include <system_error>
using namespace clang;
@@ -342,7 +341,9 @@ public:
}
};
-class PTHFileLookupTrait : public PTHFileLookupCommonTrait {
+} // end anonymous namespace
+
+class PTHManager::PTHFileLookupTrait : public PTHFileLookupCommonTrait {
public:
typedef const FileEntry* external_key_type;
typedef PTHFileData data_type;
@@ -365,7 +366,7 @@ public:
}
};
-class PTHStringLookupTrait {
+class PTHManager::PTHStringLookupTrait {
public:
typedef uint32_t data_type;
typedef const std::pair<const char*, unsigned> external_key_type;
@@ -408,31 +409,22 @@ public:
}
};
-} // end anonymous namespace
-
-typedef llvm::OnDiskChainedHashTable<PTHFileLookupTrait> PTHFileLookup;
-typedef llvm::OnDiskChainedHashTable<PTHStringLookupTrait> PTHStringIdLookup;
-
//===----------------------------------------------------------------------===//
// PTHManager methods.
//===----------------------------------------------------------------------===//
-PTHManager::PTHManager(const llvm::MemoryBuffer* buf, void* fileLookup,
- const unsigned char* idDataTable,
- IdentifierInfo** perIDCache,
- void* stringIdLookup, unsigned numIds,
- const unsigned char* spellingBase,
- const char* originalSourceFile)
-: Buf(buf), PerIDCache(perIDCache), FileLookup(fileLookup),
- IdDataTable(idDataTable), StringIdLookup(stringIdLookup),
- NumIds(numIds), PP(nullptr), SpellingBase(spellingBase),
- OriginalSourceFile(originalSourceFile) {}
+PTHManager::PTHManager(
+ std::unique_ptr<const llvm::MemoryBuffer> buf,
+ std::unique_ptr<PTHFileLookup> fileLookup, const unsigned char *idDataTable,
+ std::unique_ptr<IdentifierInfo *[], llvm::FreeDeleter> perIDCache,
+ std::unique_ptr<PTHStringIdLookup> stringIdLookup, unsigned numIds,
+ const unsigned char *spellingBase, const char *originalSourceFile)
+ : Buf(std::move(buf)), PerIDCache(std::move(perIDCache)),
+ FileLookup(std::move(fileLookup)), IdDataTable(idDataTable),
+ StringIdLookup(std::move(stringIdLookup)), NumIds(numIds), PP(nullptr),
+ SpellingBase(spellingBase), OriginalSourceFile(originalSourceFile) {}
PTHManager::~PTHManager() {
- delete Buf;
- delete (PTHFileLookup*) FileLookup;
- delete (PTHStringIdLookup*) StringIdLookup;
- free(PerIDCache);
}
static void InvalidPTH(DiagnosticsEngine &Diags, const char *Msg) {
@@ -543,10 +535,10 @@ PTHManager *PTHManager::Create(const std::string &file,
// Pre-allocate the persistent ID -> IdentifierInfo* cache. We use calloc()
// so that we in the best case only zero out memory once when the OS returns
// us new pages.
- IdentifierInfo **PerIDCache = nullptr;
+ std::unique_ptr<IdentifierInfo *[], llvm::FreeDeleter> PerIDCache;
if (NumIds) {
- PerIDCache = (IdentifierInfo**)calloc(NumIds, sizeof(*PerIDCache));
+ PerIDCache.reset((IdentifierInfo **)calloc(NumIds, sizeof(PerIDCache[0])));
if (!PerIDCache) {
InvalidPTH(Diags, "Could not allocate memory for processing PTH file");
return nullptr;
@@ -560,9 +552,9 @@ PTHManager *PTHManager::Create(const std::string &file,
if (!len) originalSourceBase = nullptr;
// Create the new PTHManager.
- return new PTHManager(File.release(), FL.release(), IData, PerIDCache,
- SL.release(), NumIds, spellingBase,
- (const char *)originalSourceBase);
+ return new PTHManager(std::move(File), std::move(FL), IData,
+ std::move(PerIDCache), std::move(SL), NumIds,
+ spellingBase, (const char *)originalSourceBase);
}
IdentifierInfo* PTHManager::LazilyCreateIdentifierInfo(unsigned PersistentID) {
@@ -589,12 +581,11 @@ IdentifierInfo* PTHManager::LazilyCreateIdentifierInfo(unsigned PersistentID) {
}
IdentifierInfo* PTHManager::get(StringRef Name) {
- PTHStringIdLookup& SL = *((PTHStringIdLookup*)StringIdLookup);
// Double check our assumption that the last character isn't '\0'.
assert(Name.empty() || Name.back() != '\0');
- PTHStringIdLookup::iterator I = SL.find(std::make_pair(Name.data(),
- Name.size()));
- if (I == SL.end()) // No identifier found?
+ PTHStringIdLookup::iterator I =
+ StringIdLookup->find(std::make_pair(Name.data(), Name.size()));
+ if (I == StringIdLookup->end()) // No identifier found?
return nullptr;
// Match found. Return the identifier!
@@ -612,10 +603,9 @@ PTHLexer *PTHManager::CreateLexer(FileID FID) {
// Lookup the FileEntry object in our file lookup data structure. It will
// return a variant that indicates whether or not there is an offset within
// the PTH file that contains cached tokens.
- PTHFileLookup& PFL = *((PTHFileLookup*)FileLookup);
- PTHFileLookup::iterator I = PFL.find(FE);
+ PTHFileLookup::iterator I = FileLookup->find(FE);
- if (I == PFL.end()) // No tokens available?
+ if (I == FileLookup->end()) // No tokens available?
return nullptr;
const PTHFileData& FileData = *I;
@@ -684,7 +674,7 @@ public:
uint64_t File = endian::readNext<uint64_t, little, unaligned>(d);
uint64_t Device = endian::readNext<uint64_t, little, unaligned>(d);
- llvm::sys::fs::UniqueID UniqueID(File, Device);
+ llvm::sys::fs::UniqueID UniqueID(Device, File);
time_t ModTime = endian::readNext<uint64_t, little, unaligned>(d);
uint64_t Size = endian::readNext<uint64_t, little, unaligned>(d);
return data_type(Size, ModTime, UniqueID, IsDirectory);
@@ -694,15 +684,17 @@ public:
return data_type();
}
};
+} // end anonymous namespace
+namespace clang {
class PTHStatCache : public FileSystemStatCache {
typedef llvm::OnDiskChainedHashTable<PTHStatLookupTrait> CacheTy;
CacheTy Cache;
public:
- PTHStatCache(PTHFileLookup &FL) :
- Cache(FL.getNumBuckets(), FL.getNumEntries(), FL.getBuckets(),
- FL.getBase()) {}
+ PTHStatCache(PTHManager::PTHFileLookup &FL)
+ : Cache(FL.getNumBuckets(), FL.getNumEntries(), FL.getBuckets(),
+ FL.getBase()) {}
LookupResult getStat(const char *Path, FileData &Data, bool isFile,
std::unique_ptr<vfs::File> *F,
@@ -730,8 +722,8 @@ public:
return CacheExists;
}
};
-} // end anonymous namespace
+}
-FileSystemStatCache *PTHManager::createStatCache() {
- return new PTHStatCache(*((PTHFileLookup*) FileLookup));
+std::unique_ptr<FileSystemStatCache> PTHManager::createStatCache() {
+ return llvm::make_unique<PTHStatCache>(*FileLookup);
}
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index cf76bdb7a913..8ed832893771 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -65,9 +65,7 @@ PragmaHandler *PragmaNamespace::FindHandler(StringRef Name,
void PragmaNamespace::AddPragma(PragmaHandler *Handler) {
assert(!Handlers.lookup(Handler->getName()) &&
"A handler with this name is already registered in this namespace");
- llvm::StringMapEntry<PragmaHandler *> &Entry =
- Handlers.GetOrCreateValue(Handler->getName());
- Entry.setValue(Handler);
+ Handlers[Handler->getName()] = Handler;
}
void PragmaNamespace::RemovePragmaHandler(PragmaHandler *Handler) {
@@ -193,7 +191,7 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
if (!tok::isStringLiteral(Tok.getKind())) {
Diag(PragmaLoc, diag::err__Pragma_malformed);
// Skip this token, and the ')', if present.
- if (Tok.isNot(tok::r_paren))
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::eof))
Lex(Tok);
if (Tok.is(tok::r_paren))
Lex(Tok);
@@ -472,9 +470,9 @@ void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
// Search include directories for this file.
const DirectoryLookup *CurDir;
- const FileEntry *File = LookupFile(FilenameTok.getLocation(), Filename,
- isAngled, nullptr, CurDir, nullptr,
- nullptr, nullptr);
+ const FileEntry *File =
+ LookupFile(FilenameTok.getLocation(), Filename, isAngled, nullptr,
+ nullptr, CurDir, nullptr, nullptr, nullptr);
if (!File) {
if (!SuppressIncludeNotFoundError)
Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
@@ -605,7 +603,7 @@ void Preprocessor::HandlePragmaPopMacro(Token &PopMacroTok) {
if (MacroToReInstall) {
// Reinstall the previously pushed macro.
appendDefMacroDirective(IdentInfo, MacroToReInstall, MessageLoc,
- /*isImported=*/false);
+ /*isImported=*/false, /*Overrides*/None);
}
// Pop PragmaPushMacroInfo stack.
@@ -728,7 +726,7 @@ void Preprocessor::HandlePragmaIncludeAlias(Token &Tok) {
/// pragma line before the pragma string starts, e.g. "STDC" or "GCC".
void Preprocessor::AddPragmaHandler(StringRef Namespace,
PragmaHandler *Handler) {
- PragmaNamespace *InsertNS = PragmaHandlers;
+ PragmaNamespace *InsertNS = PragmaHandlers.get();
// If this is specified to be in a namespace, step down into it.
if (!Namespace.empty()) {
@@ -759,7 +757,7 @@ void Preprocessor::AddPragmaHandler(StringRef Namespace,
/// a handler that has not been registered.
void Preprocessor::RemovePragmaHandler(StringRef Namespace,
PragmaHandler *Handler) {
- PragmaNamespace *NS = PragmaHandlers;
+ PragmaNamespace *NS = PragmaHandlers.get();
// If this is specified to be in a namespace, step down into it.
if (!Namespace.empty()) {
@@ -772,9 +770,8 @@ void Preprocessor::RemovePragmaHandler(StringRef Namespace,
NS->RemovePragmaHandler(Handler);
- // If this is a non-default namespace and it is now empty, remove
- // it.
- if (NS != PragmaHandlers && NS->IsEmpty()) {
+ // If this is a non-default namespace and it is now empty, remove it.
+ if (NS != PragmaHandlers.get() && NS->IsEmpty()) {
PragmaHandlers->RemovePragmaHandler(NS);
delete NS;
}
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index 4a11803aa954..b2a6d933a0f2 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -27,6 +27,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/CodeCompletionHandler.h"
@@ -61,19 +62,21 @@ Preprocessor::Preprocessor(IntrusiveRefCntPtr<PreprocessorOptions> PPOpts,
IdentifierInfoLookup *IILookup, bool OwnsHeaders,
TranslationUnitKind TUKind)
: PPOpts(PPOpts), Diags(&diags), LangOpts(opts), Target(nullptr),
- FileMgr(Headers.getFileMgr()), SourceMgr(SM), HeaderInfo(Headers),
+ FileMgr(Headers.getFileMgr()), SourceMgr(SM),
+ ScratchBuf(new ScratchBuffer(SourceMgr)),HeaderInfo(Headers),
TheModuleLoader(TheModuleLoader), ExternalSource(nullptr),
- Identifiers(opts, IILookup), IncrementalProcessing(false), TUKind(TUKind),
+ Identifiers(opts, IILookup),
+ PragmaHandlers(new PragmaNamespace(StringRef())),
+ IncrementalProcessing(false), TUKind(TUKind),
CodeComplete(nullptr), CodeCompletionFile(nullptr),
CodeCompletionOffset(0), LastTokenWasAt(false),
ModuleImportExpectsIdentifier(false), CodeCompletionReached(0),
- SkipMainFilePreamble(0, true), CurPPLexer(nullptr),
+ MainFileDir(nullptr), SkipMainFilePreamble(0, true), CurPPLexer(nullptr),
CurDirLookup(nullptr), CurLexerKind(CLK_Lexer), CurSubmodule(nullptr),
Callbacks(nullptr), MacroArgCache(nullptr), Record(nullptr),
- MIChainHead(nullptr), MICache(nullptr), DeserialMIChainHead(nullptr) {
+ MIChainHead(nullptr), DeserialMIChainHead(nullptr) {
OwnsHeaderSearch = OwnsHeaders;
- ScratchBuf = new ScratchBuffer(SourceMgr);
CounterValue = 0; // __COUNTER__ starts at 0.
// Clear stats.
@@ -111,7 +114,6 @@ Preprocessor::Preprocessor(IntrusiveRefCntPtr<PreprocessorOptions> PPOpts,
SetPoisonReason(Ident__VA_ARGS__,diag::ext_pp_bad_vaargs_use);
// Initialize the pragma handlers.
- PragmaHandlers = new PragmaNamespace(StringRef());
RegisterBuiltinPragmas();
// Initialize builtin macros like __LINE__ and friends.
@@ -141,35 +143,30 @@ Preprocessor::~Preprocessor() {
IncludeMacroStack.clear();
- // Free any macro definitions.
- for (MacroInfoChain *I = MIChainHead ; I ; I = I->Next)
- I->MI.Destroy();
+ // Destroy any macro definitions.
+ while (MacroInfoChain *I = MIChainHead) {
+ MIChainHead = I->Next;
+ I->~MacroInfoChain();
+ }
// Free any cached macro expanders.
// This populates MacroArgCache, so all TokenLexers need to be destroyed
// before the code below that frees up the MacroArgCache list.
- for (unsigned i = 0, e = NumCachedTokenLexers; i != e; ++i)
- delete TokenLexerCache[i];
+ std::fill(TokenLexerCache, TokenLexerCache + NumCachedTokenLexers, nullptr);
CurTokenLexer.reset();
- for (DeserializedMacroInfoChain *I = DeserialMIChainHead ; I ; I = I->Next)
- I->MI.Destroy();
+ while (DeserializedMacroInfoChain *I = DeserialMIChainHead) {
+ DeserialMIChainHead = I->Next;
+ I->~DeserializedMacroInfoChain();
+ }
// Free any cached MacroArgs.
for (MacroArgs *ArgList = MacroArgCache; ArgList;)
ArgList = ArgList->deallocate();
- // Release pragma information.
- delete PragmaHandlers;
-
- // Delete the scratch buffer info.
- delete ScratchBuf;
-
// Delete the header search info, if we own it.
if (OwnsHeaderSearch)
delete &HeaderInfo;
-
- delete Callbacks;
}
void Preprocessor::Initialize(const TargetInfo &Target) {
@@ -182,6 +179,24 @@ void Preprocessor::Initialize(const TargetInfo &Target) {
HeaderInfo.setTarget(Target);
}
+void Preprocessor::InitializeForModelFile() {
+ NumEnteredSourceFiles = 0;
+
+ // Reset pragmas
+ PragmaHandlersBackup = std::move(PragmaHandlers);
+ PragmaHandlers = llvm::make_unique<PragmaNamespace>(StringRef());
+ RegisterBuiltinPragmas();
+
+ // Reset PredefinesFileID
+ PredefinesFileID = FileID();
+}
+
+void Preprocessor::FinalizeForModelFile() {
+ NumEnteredSourceFiles = 1;
+
+ PragmaHandlers = std::move(PragmaHandlersBackup);
+}
+
void Preprocessor::setPTHManager(PTHManager* pm) {
PTH.reset(pm);
FileMgr.addStatCache(PTH->createStatCache());
@@ -370,21 +385,29 @@ bool Preprocessor::SetCodeCompletionPoint(const FileEntry *File,
Position += CompleteColumn - 1;
- // Insert '\0' at the code-completion point.
- if (Position < Buffer->getBufferEnd()) {
- CodeCompletionFile = File;
- CodeCompletionOffset = Position - Buffer->getBufferStart();
-
- MemoryBuffer *NewBuffer =
- MemoryBuffer::getNewUninitMemBuffer(Buffer->getBufferSize() + 1,
- Buffer->getBufferIdentifier());
- char *NewBuf = const_cast<char*>(NewBuffer->getBufferStart());
- char *NewPos = std::copy(Buffer->getBufferStart(), Position, NewBuf);
- *NewPos = '\0';
- std::copy(Position, Buffer->getBufferEnd(), NewPos+1);
- SourceMgr.overrideFileContents(File, NewBuffer);
+ // If pointing inside the preamble, adjust the position at the beginning of
+ // the file after the preamble.
+ if (SkipMainFilePreamble.first &&
+ SourceMgr.getFileEntryForID(SourceMgr.getMainFileID()) == File) {
+ if (Position - Buffer->getBufferStart() < SkipMainFilePreamble.first)
+ Position = Buffer->getBufferStart() + SkipMainFilePreamble.first;
}
+ if (Position > Buffer->getBufferEnd())
+ Position = Buffer->getBufferEnd();
+
+ CodeCompletionFile = File;
+ CodeCompletionOffset = Position - Buffer->getBufferStart();
+
+ std::unique_ptr<MemoryBuffer> NewBuffer =
+ MemoryBuffer::getNewUninitMemBuffer(Buffer->getBufferSize() + 1,
+ Buffer->getBufferIdentifier());
+ char *NewBuf = const_cast<char*>(NewBuffer->getBufferStart());
+ char *NewPos = std::copy(Buffer->getBufferStart(), Position, NewBuf);
+ *NewPos = '\0';
+ std::copy(Position, Buffer->getBufferEnd(), NewPos+1);
+ SourceMgr.overrideFileContents(File, std::move(NewBuffer));
+
return false;
}
@@ -479,10 +502,10 @@ void Preprocessor::EnterMainSourceFile() {
}
// Preprocess Predefines to populate the initial preprocessor state.
- llvm::MemoryBuffer *SB =
+ std::unique_ptr<llvm::MemoryBuffer> SB =
llvm::MemoryBuffer::getMemBufferCopy(Predefines, "<built-in>");
assert(SB && "Cannot create predefined source buffer");
- FileID FID = SourceMgr.createFileID(SB);
+ FileID FID = SourceMgr.createFileID(std::move(SB));
assert(!FID.isInvalid() && "Could not create FileID for predefines?");
setPredefinesFileID(FID);
@@ -649,7 +672,8 @@ bool Preprocessor::HandleIdentifier(Token &Identifier) {
// keyword when we're in a caching lexer, because caching lexers only get
// used in contexts where import declarations are disallowed.
if (LastTokenWasAt && II.isModulesImport() && !InMacroArgs &&
- !DisableMacroExpansion && getLangOpts().Modules &&
+ !DisableMacroExpansion &&
+ (getLangOpts().Modules || getLangOpts().DebuggerSupport) &&
CurLexerKind != CLK_CachingLexer) {
ModuleImportLoc = Identifier.getLocation();
ModuleImportPath.clear();
@@ -722,12 +746,14 @@ void Preprocessor::LexAfterModuleImport(Token &Result) {
}
// If we have a non-empty module path, load the named module.
- if (!ModuleImportPath.empty() && getLangOpts().Modules) {
- Module *Imported = TheModuleLoader.loadModule(ModuleImportLoc,
- ModuleImportPath,
- Module::MacrosVisible,
- /*IsIncludeDirective=*/false);
- if (Callbacks)
+ if (!ModuleImportPath.empty()) {
+ Module *Imported = nullptr;
+ if (getLangOpts().Modules)
+ Imported = TheModuleLoader.loadModule(ModuleImportLoc,
+ ModuleImportPath,
+ Module::MacrosVisible,
+ /*IsIncludeDirective=*/false);
+ if (Callbacks && (getLangOpts().Modules || getLangOpts().DebuggerSupport))
Callbacks->moduleImport(ModuleImportLoc, ModuleImportPath, Imported);
}
}
@@ -830,5 +856,5 @@ void Preprocessor::createPreprocessingRecord() {
return;
Record = new PreprocessingRecord(getSourceManager());
- addPPCallbacks(Record);
+ addPPCallbacks(std::unique_ptr<PPCallbacks>(Record));
}
diff --git a/lib/Lex/ScratchBuffer.cpp b/lib/Lex/ScratchBuffer.cpp
index d7104b178860..3bac8891c646 100644
--- a/lib/Lex/ScratchBuffer.cpp
+++ b/lib/Lex/ScratchBuffer.cpp
@@ -64,11 +64,12 @@ void ScratchBuffer::AllocScratchBuffer(unsigned RequestLen) {
if (RequestLen < ScratchBufSize)
RequestLen = ScratchBufSize;
- llvm::MemoryBuffer *Buf =
- llvm::MemoryBuffer::getNewMemBuffer(RequestLen, "<scratch space>");
- FileID FID = SourceMgr.createFileID(Buf);
+ std::unique_ptr<llvm::MemoryBuffer> OwnBuf =
+ llvm::MemoryBuffer::getNewMemBuffer(RequestLen, "<scratch space>");
+ llvm::MemoryBuffer &Buf = *OwnBuf;
+ FileID FID = SourceMgr.createFileID(std::move(OwnBuf));
BufferStartLoc = SourceMgr.getLocForStartOfFile(FID);
- CurBuffer = const_cast<char*>(Buf->getBufferStart());
+ CurBuffer = const_cast<char*>(Buf.getBufferStart());
BytesUsed = 1;
CurBuffer[0] = '0'; // Start out with a \0 for cleanliness.
}
diff --git a/lib/Lex/TokenConcatenation.cpp b/lib/Lex/TokenConcatenation.cpp
index 0a66bba91fcd..08327496ab8a 100644
--- a/lib/Lex/TokenConcatenation.cpp
+++ b/lib/Lex/TokenConcatenation.cpp
@@ -99,6 +99,10 @@ TokenConcatenation::TokenConcatenation(Preprocessor &pp) : PP(pp) {
TokenInfo[tok::utf32_char_constant ] |= aci_custom;
}
+ // These tokens have custom code in C++1z mode.
+ if (PP.getLangOpts().CPlusPlus1z)
+ TokenInfo[tok::utf8_char_constant] |= aci_custom;
+
// These tokens change behavior if followed by an '='.
TokenInfo[tok::amp ] |= aci_avoid_equal; // &=
TokenInfo[tok::plus ] |= aci_avoid_equal; // +=
@@ -163,8 +167,8 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
return false;
tok::TokenKind PrevKind = PrevTok.getKind();
- if (PrevTok.getIdentifierInfo()) // Language keyword or named operator.
- PrevKind = tok::identifier;
+ if (!PrevTok.isAnnotation() && PrevTok.getIdentifierInfo())
+ PrevKind = tok::identifier; // Language keyword or named operator.
// Look up information on when we should avoid concatenation with prevtok.
unsigned ConcatInfo = TokenInfo[PrevKind];
@@ -178,6 +182,14 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
return true;
ConcatInfo &= ~aci_avoid_equal;
}
+ if (Tok.isAnnotation()) {
+ // Modules annotation can show up when generated automatically for includes.
+ assert((Tok.is(tok::annot_module_include) ||
+ Tok.is(tok::annot_module_begin) ||
+ Tok.is(tok::annot_module_end)) &&
+ "unexpected annotation in AvoidConcat");
+ ConcatInfo = 0;
+ }
if (ConcatInfo == 0) return false;
@@ -205,6 +217,7 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
case tok::utf32_string_literal:
case tok::char_constant:
case tok::wide_char_constant:
+ case tok::utf8_char_constant:
case tok::utf16_char_constant:
case tok::utf32_char_constant:
if (!PP.getLangOpts().CPlusPlus11)
@@ -228,7 +241,8 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
if (Tok.getIdentifierInfo() || Tok.is(tok::wide_string_literal) ||
Tok.is(tok::utf8_string_literal) || Tok.is(tok::utf16_string_literal) ||
Tok.is(tok::utf32_string_literal) || Tok.is(tok::wide_char_constant) ||
- Tok.is(tok::utf16_char_constant) || Tok.is(tok::utf32_char_constant))
+ Tok.is(tok::utf8_char_constant) || Tok.is(tok::utf16_char_constant) ||
+ Tok.is(tok::utf32_char_constant))
return true;
// If this isn't identifier + string, we're done.
diff --git a/lib/Lex/TokenLexer.cpp b/lib/Lex/TokenLexer.cpp
index 9d03e8d30b8b..5f4705eff697 100644
--- a/lib/Lex/TokenLexer.cpp
+++ b/lib/Lex/TokenLexer.cpp
@@ -206,6 +206,7 @@ void TokenLexer::ExpandFunctionArguments() {
ExpansionLocStart,
ExpansionLocEnd);
}
+ Res.setFlag(Token::StringifiedInMacro);
// The stringified/charified string leading space flag gets set to match
// the #/#@ operator.
@@ -405,6 +406,14 @@ void TokenLexer::ExpandFunctionArguments() {
}
}
+/// \brief Checks if two tokens form wide string literal.
+static bool isWideStringLiteralFromMacro(const Token &FirstTok,
+ const Token &SecondTok) {
+ return FirstTok.is(tok::identifier) &&
+ FirstTok.getIdentifierInfo()->isStr("L") && SecondTok.isLiteral() &&
+ SecondTok.stringifiedInMacro();
+}
+
/// Lex - Lex and return a token from this macro stream.
///
bool TokenLexer::Lex(Token &Tok) {
@@ -435,7 +444,13 @@ bool TokenLexer::Lex(Token &Tok) {
// If this token is followed by a token paste (##) operator, paste the tokens!
// Note that ## is a normal token when not expanding a macro.
- if (!isAtEnd() && Tokens[CurToken].is(tok::hashhash) && Macro) {
+ if (!isAtEnd() && Macro &&
+ (Tokens[CurToken].is(tok::hashhash) ||
+ // Special processing of L#x macros in -fms-compatibility mode.
+ // Microsoft compiler is able to form a wide string literal from
+ // 'L#macro_arg' construct in a function-like macro.
+ (PP.getLangOpts().MSVCCompat &&
+ isWideStringLiteralFromMacro(Tok, Tokens[CurToken])))) {
// When handling the microsoft /##/ extension, the final token is
// returned by PasteTokens, not the pasted token.
if (PasteTokens(Tok))
@@ -511,9 +526,10 @@ bool TokenLexer::PasteTokens(Token &Tok) {
SourceLocation StartLoc = Tok.getLocation();
SourceLocation PasteOpLoc;
do {
- // Consume the ## operator.
+ // Consume the ## operator if any.
PasteOpLoc = Tokens[CurToken].getLocation();
- ++CurToken;
+ if (Tokens[CurToken].is(tok::hashhash))
+ ++CurToken;
assert(!isAtEnd() && "No token on the RHS of a paste operator!");
// Get the RHS token.
@@ -531,12 +547,13 @@ bool TokenLexer::PasteTokens(Token &Tok) {
memcpy(&Buffer[0], BufPtr, LHSLen);
if (Invalid)
return true;
-
- BufPtr = &Buffer[LHSLen];
+
+ BufPtr = Buffer.data() + LHSLen;
unsigned RHSLen = PP.getSpelling(RHS, BufPtr, &Invalid);
if (Invalid)
return true;
- if (BufPtr != &Buffer[LHSLen]) // Really, we want the chars in Buffer!
+ if (RHSLen && BufPtr != &Buffer[LHSLen])
+ // Really, we want the chars in Buffer!
memcpy(&Buffer[LHSLen], BufPtr, RHSLen);
// Trim excess space.
diff --git a/lib/Lex/UnicodeCharSets.h b/lib/Lex/UnicodeCharSets.h
index 12b24564bfd3..116d553d2040 100644
--- a/lib/Lex/UnicodeCharSets.h
+++ b/lib/Lex/UnicodeCharSets.h
@@ -6,8 +6,8 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_LEX_UNICODECHARSETS_H
-#define CLANG_LEX_UNICODECHARSETS_H
+#ifndef LLVM_CLANG_LIB_LEX_UNICODECHARSETS_H
+#define LLVM_CLANG_LIB_LEX_UNICODECHARSETS_H
#include "llvm/Support/UnicodeCharRanges.h"
diff --git a/lib/Parse/ParseAST.cpp b/lib/Parse/ParseAST.cpp
index 0f5a1b3ffbd6..6727afc1dd6c 100644
--- a/lib/Parse/ParseAST.cpp
+++ b/lib/Parse/ParseAST.cpp
@@ -14,7 +14,6 @@
#include "clang/Parse/ParseAST.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Stmt.h"
#include "clang/Parse/ParseDiagnostic.h"
@@ -146,10 +145,8 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
}
// Process any TopLevelDecls generated by #pragma weak.
- for (SmallVectorImpl<Decl *>::iterator
- I = S.WeakTopLevelDecls().begin(),
- E = S.WeakTopLevelDecls().end(); I != E; ++I)
- Consumer->HandleTopLevelDecl(DeclGroupRef(*I));
+ for (Decl *D : S.WeakTopLevelDecls())
+ Consumer->HandleTopLevelDecl(DeclGroupRef(D));
Consumer->HandleTranslationUnit(S.getASTContext());
diff --git a/lib/Parse/ParseCXXInlineMethods.cpp b/lib/Parse/ParseCXXInlineMethods.cpp
index 30a9120a5106..59b491abfd15 100644
--- a/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/lib/Parse/ParseCXXInlineMethods.cpp
@@ -218,6 +218,7 @@ void Parser::ParseCXXNonStaticMemberInitializer(Decl *VarD) {
Eof.startToken();
Eof.setKind(tok::eof);
Eof.setLocation(Tok.getLocation());
+ Eof.setEofData(VarD);
Toks.push_back(Eof);
}
@@ -308,10 +309,17 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
// Introduce the parameter into scope.
Actions.ActOnDelayedCXXMethodParameter(getCurScope(),
LM.DefaultArgs[I].Param);
-
if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) {
- // Save the current token position.
- SourceLocation origLoc = Tok.getLocation();
+ // Mark the end of the default argument so that we know when to stop when
+ // we parse it later on.
+ Token LastDefaultArgToken = Toks->back();
+ Token DefArgEnd;
+ DefArgEnd.startToken();
+ DefArgEnd.setKind(tok::eof);
+ DefArgEnd.setLocation(LastDefaultArgToken.getLocation().getLocWithOffset(
+ LastDefaultArgToken.getLength()));
+ DefArgEnd.setEofData(LM.DefaultArgs[I].Param);
+ Toks->push_back(DefArgEnd);
// Parse the default argument from its saved token stream.
Toks->push_back(Tok); // So that the current token doesn't get lost
@@ -336,11 +344,13 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
DefArgResult = ParseBraceInitializer();
} else
DefArgResult = ParseAssignmentExpression();
- if (DefArgResult.isInvalid())
+ DefArgResult = Actions.CorrectDelayedTyposInExpr(DefArgResult);
+ if (DefArgResult.isInvalid()) {
Actions.ActOnParamDefaultArgumentError(LM.DefaultArgs[I].Param,
EqualLoc);
- else {
- if (!TryConsumeToken(tok::cxx_defaultarg_end)) {
+ } else {
+ if (Tok.isNot(tok::eof) ||
+ Tok.getEofData() != LM.DefaultArgs[I].Param) {
// The last two tokens are the terminator and the saved value of
// Tok; the last token in the default argument is the one before
// those.
@@ -353,12 +363,12 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
DefArgResult.get());
}
- assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
- Tok.getLocation()) &&
- "ParseAssignmentExpression went over the default arg tokens!");
// There could be leftover tokens (e.g. because of an error).
- // Skip through until we reach the original token position.
- while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
+ // Skip through until we reach the 'end of default argument' token.
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ if (Tok.is(tok::eof) && Tok.getEofData() == LM.DefaultArgs[I].Param)
ConsumeAnyToken();
delete Toks;
@@ -366,6 +376,80 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
}
}
+ // Parse a delayed exception-specification, if there is one.
+ if (CachedTokens *Toks = LM.ExceptionSpecTokens) {
+ // Add the 'stop' token.
+ Token LastExceptionSpecToken = Toks->back();
+ Token ExceptionSpecEnd;
+ ExceptionSpecEnd.startToken();
+ ExceptionSpecEnd.setKind(tok::eof);
+ ExceptionSpecEnd.setLocation(
+ LastExceptionSpecToken.getLocation().getLocWithOffset(
+ LastExceptionSpecToken.getLength()));
+ ExceptionSpecEnd.setEofData(LM.Method);
+ Toks->push_back(ExceptionSpecEnd);
+
+ // Parse the default argument from its saved token stream.
+ Toks->push_back(Tok); // So that the current token doesn't get lost
+ PP.EnterTokenStream(&Toks->front(), Toks->size(), true, false);
+
+ // Consume the previously-pushed token.
+ ConsumeAnyToken();
+
+ // C++11 [expr.prim.general]p3:
+ // If a declaration declares a member function or member function
+ // template of a class X, the expression this is a prvalue of type
+ // "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
+ // and the end of the function-definition, member-declarator, or
+ // declarator.
+ CXXMethodDecl *Method;
+ if (FunctionTemplateDecl *FunTmpl
+ = dyn_cast<FunctionTemplateDecl>(LM.Method))
+ Method = cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
+ else
+ Method = cast<CXXMethodDecl>(LM.Method);
+
+ Sema::CXXThisScopeRAII ThisScope(Actions, Method->getParent(),
+ Method->getTypeQualifiers(),
+ getLangOpts().CPlusPlus11);
+
+ // Parse the exception-specification.
+ SourceRange SpecificationRange;
+ SmallVector<ParsedType, 4> DynamicExceptions;
+ SmallVector<SourceRange, 4> DynamicExceptionRanges;
+ ExprResult NoexceptExpr;
+ CachedTokens *ExceptionSpecTokens;
+
+ ExceptionSpecificationType EST
+ = tryParseExceptionSpecification(/*Delayed=*/false, SpecificationRange,
+ DynamicExceptions,
+ DynamicExceptionRanges, NoexceptExpr,
+ ExceptionSpecTokens);
+
+ if (Tok.isNot(tok::eof) || Tok.getEofData() != LM.Method)
+ Diag(Tok.getLocation(), diag::err_except_spec_unparsed);
+
+ // Attach the exception-specification to the method.
+ Actions.actOnDelayedExceptionSpecification(LM.Method, EST,
+ SpecificationRange,
+ DynamicExceptions,
+ DynamicExceptionRanges,
+ NoexceptExpr.isUsable()?
+ NoexceptExpr.get() : nullptr);
+
+ // There could be leftover tokens (e.g. because of an error).
+ // Skip through until we reach the original token position.
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ // Clean up the remaining EOF token.
+ if (Tok.is(tok::eof) && Tok.getEofData() == LM.Method)
+ ConsumeAnyToken();
+
+ delete Toks;
+ LM.ExceptionSpecTokens = nullptr;
+ }
+
PrototypeScope.Exit();
// Finish the delayed C++ method declaration.
@@ -400,10 +484,16 @@ void Parser::ParseLexedMethodDef(LexedMethod &LM) {
Actions.ActOnReenterTemplateScope(getCurScope(), LM.D);
++CurTemplateDepthTracker;
}
- // Save the current token position.
- SourceLocation origLoc = Tok.getLocation();
assert(!LM.Toks.empty() && "Empty body!");
+ Token LastBodyToken = LM.Toks.back();
+ Token BodyEnd;
+ BodyEnd.startToken();
+ BodyEnd.setKind(tok::eof);
+ BodyEnd.setLocation(
+ LastBodyToken.getLocation().getLocWithOffset(LastBodyToken.getLength()));
+ BodyEnd.setEofData(LM.D);
+ LM.Toks.push_back(BodyEnd);
// Append the current token at the end of the new token stream so that it
// doesn't get lost.
LM.Toks.push_back(Tok);
@@ -421,12 +511,11 @@ void Parser::ParseLexedMethodDef(LexedMethod &LM) {
if (Tok.is(tok::kw_try)) {
ParseFunctionTryBlock(LM.D, FnScope);
- assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
- Tok.getLocation()) &&
- "ParseFunctionTryBlock went over the cached tokens!");
- // There could be leftover tokens (e.g. because of an error).
- // Skip through until we reach the original token position.
- while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
+
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ if (Tok.is(tok::eof) && Tok.getEofData() == LM.D)
ConsumeAnyToken();
return;
}
@@ -437,7 +526,11 @@ void Parser::ParseLexedMethodDef(LexedMethod &LM) {
if (!Tok.is(tok::l_brace)) {
FnScope.Exit();
Actions.ActOnFinishFunctionBody(LM.D, nullptr);
- while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
+
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ if (Tok.is(tok::eof) && Tok.getEofData() == LM.D)
ConsumeAnyToken();
return;
}
@@ -457,17 +550,11 @@ void Parser::ParseLexedMethodDef(LexedMethod &LM) {
if (LM.D)
LM.D->getAsFunction()->setLateTemplateParsed(false);
- if (Tok.getLocation() != origLoc) {
- // Due to parsing error, we either went over the cached tokens or
- // there are still cached tokens left. If it's the latter case skip the
- // leftover tokens.
- // Since this is an uncommon situation that should be avoided, use the
- // expensive isBeforeInTranslationUnit call.
- if (PP.getSourceManager().isBeforeInTranslationUnit(Tok.getLocation(),
- origLoc))
- while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
- ConsumeAnyToken();
- }
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ if (Tok.is(tok::eof) && Tok.getEofData() == LM.D)
+ ConsumeAnyToken();
if (CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(LM.D))
Actions.ActOnFinishInlineMethodDef(MD);
@@ -540,17 +627,21 @@ void Parser::ParseLexedMemberInitializer(LateParsedMemberInitializer &MI) {
// The next token should be our artificial terminating EOF token.
if (Tok.isNot(tok::eof)) {
- SourceLocation EndLoc = PP.getLocForEndOfToken(PrevTokLocation);
- if (!EndLoc.isValid())
- EndLoc = Tok.getLocation();
- // No fixit; we can't recover as if there were a semicolon here.
- Diag(EndLoc, diag::err_expected_semi_decl_list);
+ if (!Init.isInvalid()) {
+ SourceLocation EndLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ if (!EndLoc.isValid())
+ EndLoc = Tok.getLocation();
+ // No fixit; we can't recover as if there were a semicolon here.
+ Diag(EndLoc, diag::err_expected_semi_decl_list);
+ }
// Consume tokens until we hit the artificial EOF.
while (Tok.isNot(tok::eof))
ConsumeAnyToken();
}
- ConsumeAnyToken();
+ // Make sure this is *our* artificial EOF token.
+ if (Tok.getEofData() == MI.Field)
+ ConsumeAnyToken();
}
/// ConsumeAndStoreUntil - Consume and store the token at the passed token
@@ -891,11 +982,13 @@ private:
/// ConsumeAndStoreInitializer - Consume and store the token at the passed token
/// container until the end of the current initializer expression (either a
/// default argument or an in-class initializer for a non-static data member).
-/// The final token is not consumed.
+///
+/// Returns \c true if we reached the end of something initializer-shaped,
+/// \c false if we bailed out.
bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
CachedInitKind CIK) {
// We always want this function to consume at least one token if not at EOF.
- bool IsFirstTokenConsumed = true;
+ bool IsFirstToken = true;
// Number of possible unclosed <s we've seen so far. These might be templates,
// and might not, but if there were none of them (or we know for sure that
@@ -942,7 +1035,7 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
case CIK_DefaultArgument:
bool InvalidAsDeclaration = false;
Result = TryParseParameterDeclarationClause(
- &InvalidAsDeclaration, /*VersusTemplateArgument*/true);
+ &InvalidAsDeclaration, /*VersusTemplateArgument=*/true);
// If this is an expression or a declaration with a missing
// 'typename', assume it's not a declaration.
if (Result == TPResult::Ambiguous && InvalidAsDeclaration)
@@ -1014,6 +1107,7 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
Toks.push_back(Tok);
ConsumeToken();
if (Tok.is(tok::less)) {
+ ++AngleCount;
++KnownTemplateCount;
Toks.push_back(Tok);
ConsumeToken();
@@ -1063,21 +1157,28 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
// Since the user wasn't looking for this token (if they were, it would
// already be handled), this isn't balanced. If there is a LHS token at a
// higher level, we will assume that this matches the unbalanced token
- // and return it. Otherwise, this is a spurious RHS token, which we skip.
+ // and return it. Otherwise, this is a spurious RHS token, which we
+ // consume and pass on to downstream code to diagnose.
case tok::r_paren:
if (CIK == CIK_DefaultArgument)
return true; // End of the default argument.
- if (ParenCount && !IsFirstTokenConsumed)
- return false; // Matches something.
- goto consume_token;
+ if (ParenCount && !IsFirstToken)
+ return false;
+ Toks.push_back(Tok);
+ ConsumeParen();
+ continue;
case tok::r_square:
- if (BracketCount && !IsFirstTokenConsumed)
- return false; // Matches something.
- goto consume_token;
+ if (BracketCount && !IsFirstToken)
+ return false;
+ Toks.push_back(Tok);
+ ConsumeBracket();
+ continue;
case tok::r_brace:
- if (BraceCount && !IsFirstTokenConsumed)
- return false; // Matches something.
- goto consume_token;
+ if (BraceCount && !IsFirstToken)
+ return false;
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ continue;
case tok::code_completion:
Toks.push_back(Tok);
@@ -1102,6 +1203,6 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
ConsumeToken();
break;
}
- IsFirstTokenConsumed = false;
+ IsFirstToken = false;
}
}
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
index 62d43768bff4..4d05e16491b0 100644
--- a/lib/Parse/ParseDecl.cpp
+++ b/lib/Parse/ParseDecl.cpp
@@ -143,10 +143,12 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
continue;
// Expect an identifier or declaration specifier (const, int, etc.)
- if (Tok.isNot(tok::identifier) && !isDeclarationSpecifier())
+ if (Tok.isAnnotation())
break;
-
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ if (!AttrName)
+ break;
+
SourceLocation AttrNameLoc = ConsumeToken();
if (Tok.isNot(tok::l_paren)) {
@@ -302,7 +304,8 @@ unsigned Parser::ParseAttributeArgsCommon(
Unevaluated.reset(
new EnterExpressionEvaluationContext(Actions, Sema::Unevaluated));
- ExprResult ArgExpr(ParseAssignmentExpression());
+ ExprResult ArgExpr(
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()));
if (ArgExpr.isInvalid()) {
SkipUntil(tok::r_paren, StopAtSemi);
return 0;
@@ -588,15 +591,64 @@ void Parser::ParseMicrosoftDeclSpec(ParsedAttributes &Attrs) {
void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
// Treat these like attributes
- while (Tok.is(tok::kw___fastcall) || Tok.is(tok::kw___stdcall) ||
- Tok.is(tok::kw___thiscall) || Tok.is(tok::kw___cdecl) ||
- Tok.is(tok::kw___ptr64) || Tok.is(tok::kw___w64) ||
- Tok.is(tok::kw___ptr32) || Tok.is(tok::kw___unaligned) ||
- Tok.is(tok::kw___sptr) || Tok.is(tok::kw___uptr)) {
- IdentifierInfo *AttrName = Tok.getIdentifierInfo();
- SourceLocation AttrNameLoc = ConsumeToken();
- attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- AttributeList::AS_Keyword);
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::kw___fastcall:
+ case tok::kw___stdcall:
+ case tok::kw___thiscall:
+ case tok::kw___cdecl:
+ case tok::kw___vectorcall:
+ case tok::kw___ptr64:
+ case tok::kw___w64:
+ case tok::kw___ptr32:
+ case tok::kw___unaligned:
+ case tok::kw___sptr:
+ case tok::kw___uptr: {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ AttributeList::AS_Keyword);
+ break;
+ }
+ default:
+ return;
+ }
+ }
+}
+
+void Parser::DiagnoseAndSkipExtendedMicrosoftTypeAttributes() {
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = SkipExtendedMicrosoftTypeAttributes();
+
+ if (EndLoc.isValid()) {
+ SourceRange Range(StartLoc, EndLoc);
+ Diag(StartLoc, diag::warn_microsoft_qualifiers_ignored) << Range;
+ }
+}
+
+SourceLocation Parser::SkipExtendedMicrosoftTypeAttributes() {
+ SourceLocation EndLoc;
+
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw___fastcall:
+ case tok::kw___stdcall:
+ case tok::kw___thiscall:
+ case tok::kw___cdecl:
+ case tok::kw___vectorcall:
+ case tok::kw___ptr32:
+ case tok::kw___ptr64:
+ case tok::kw___w64:
+ case tok::kw___unaligned:
+ case tok::kw___sptr:
+ case tok::kw___uptr:
+ EndLoc = ConsumeToken();
+ break;
+ default:
+ return EndLoc;
+ }
}
}
@@ -627,6 +679,10 @@ void Parser::ParseOpenCLQualifiers(ParsedAttributes &Attrs) {
AttributeList::AS_Keyword);
}
+static bool VersionNumberSeparator(const char Separator) {
+ return (Separator == '.' || Separator == '_');
+}
+
/// \brief Parse a version number.
///
/// version:
@@ -684,7 +740,9 @@ VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
return VersionTuple(Major);
}
- if (ThisTokBegin[AfterMajor] != '.' || (AfterMajor + 1 == ActualLength)) {
+ const char AfterMajorSeparator = ThisTokBegin[AfterMajor];
+ if (!VersionNumberSeparator(AfterMajorSeparator)
+ || (AfterMajor + 1 == ActualLength)) {
Diag(Tok, diag::err_expected_version);
SkipUntil(tok::comma, tok::r_paren,
StopAtSemi | StopBeforeMatch | StopAtCodeCompletion);
@@ -708,16 +766,21 @@ VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
return VersionTuple();
}
- return VersionTuple(Major, Minor);
+ return VersionTuple(Major, Minor, (AfterMajorSeparator == '_'));
}
- // If what follows is not a '.', we have a problem.
- if (ThisTokBegin[AfterMinor] != '.') {
+ const char AfterMinorSeparator = ThisTokBegin[AfterMinor];
+ // If what follows is not a '.' or '_', we have a problem.
+ if (!VersionNumberSeparator(AfterMinorSeparator)) {
Diag(Tok, diag::err_expected_version);
SkipUntil(tok::comma, tok::r_paren,
StopAtSemi | StopBeforeMatch | StopAtCodeCompletion);
return VersionTuple();
}
+
+ // Warn if separators, be it '.' or '_', do not match.
+ if (AfterMajorSeparator != AfterMinorSeparator)
+ Diag(Tok, diag::warn_expected_consistent_version_separator);
// Parse the subminor version.
unsigned AfterSubminor = AfterMinor + 1;
@@ -734,7 +797,7 @@ VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
return VersionTuple();
}
ConsumeToken();
- return VersionTuple(Major, Minor, Subminor);
+ return VersionTuple(Major, Minor, Subminor, (AfterMajorSeparator == '_'));
}
/// \brief Parse the contents of the "availability" attribute.
@@ -846,6 +909,19 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
break;
}
+ // Special handling of 'NA' only when applied to introduced or
+ // deprecated.
+ if ((Keyword == Ident_introduced || Keyword == Ident_deprecated) &&
+ Tok.is(tok::identifier)) {
+ IdentifierInfo *NA = Tok.getIdentifierInfo();
+ if (NA->getName() == "NA") {
+ ConsumeToken();
+ if (Keyword == Ident_introduced)
+ UnavailableLoc = KeywordLoc;
+ continue;
+ }
+ }
+
SourceRange VersionRange;
VersionTuple Version = ParseVersionTuple(VersionRange);
@@ -1071,8 +1147,14 @@ void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
/// to the Attribute list for the decl.
void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition) {
- // Save the current token position.
- SourceLocation OrigLoc = Tok.getLocation();
+ // Create a fake EOF so that attribute parsing won't go off the end of the
+ // attribute.
+ Token AttrEnd;
+ AttrEnd.startToken();
+ AttrEnd.setKind(tok::eof);
+ AttrEnd.setLocation(Tok.getLocation());
+ AttrEnd.setEofData(LA.Toks.data());
+ LA.Toks.push_back(AttrEnd);
// Append the current token at the end of the new token stream so that it
// doesn't get lost.
@@ -1137,16 +1219,13 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
for (unsigned i = 0, ni = LA.Decls.size(); i < ni; ++i)
Actions.ActOnFinishDelayedAttribute(getCurScope(), LA.Decls[i], Attrs);
- if (Tok.getLocation() != OrigLoc) {
- // Due to a parsing error, we either went over the cached tokens or
- // there are still cached tokens left, so we skip the leftover tokens.
- // Since this is an uncommon situation that should be avoided, use the
- // expensive isBeforeInTranslationUnit call.
- if (PP.getSourceManager().isBeforeInTranslationUnit(Tok.getLocation(),
- OrigLoc))
- while (Tok.getLocation() != OrigLoc && Tok.isNot(tok::eof))
- ConsumeAnyToken();
- }
+ // Due to a parsing error, we either went over the cached tokens or
+ // there are still cached tokens left, so we skip the leftover tokens.
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ if (Tok.is(tok::eof) && Tok.getEofData() == AttrEnd.getEofData())
+ ConsumeAnyToken();
}
void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
@@ -1297,8 +1376,7 @@ void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &attrs) {
/// [C++11/C11] static_assert-declaration
/// others... [FIXME]
///
-Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
- unsigned Context,
+Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs) {
ParenBraceBracketBalancer BalancerRAIIObj(*this);
@@ -1322,7 +1400,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
SingleDecl = ParseNamespace(Context, DeclEnd, InlineLoc);
break;
}
- return ParseSimpleDeclaration(Stmts, Context, DeclEnd, attrs,
+ return ParseSimpleDeclaration(Context, DeclEnd, attrs,
true);
case tok::kw_namespace:
ProhibitAttributes(attrs);
@@ -1338,7 +1416,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
SingleDecl = ParseStaticAssertDeclaration(DeclEnd);
break;
default:
- return ParseSimpleDeclaration(Stmts, Context, DeclEnd, attrs, true);
+ return ParseSimpleDeclaration(Context, DeclEnd, attrs, true);
}
// This routine returns a DeclGroup, if the thing we parsed only contains a
@@ -1364,7 +1442,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
/// of a simple-declaration. If we find that we are, we also parse the
/// for-range-initializer, and place it here.
Parser::DeclGroupPtrTy
-Parser::ParseSimpleDeclaration(StmtVector &Stmts, unsigned Context,
+Parser::ParseSimpleDeclaration(unsigned Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &Attrs,
bool RequireSemi, ForRangeInit *FRI) {
@@ -1393,7 +1471,7 @@ Parser::ParseSimpleDeclaration(StmtVector &Stmts, unsigned Context,
}
DS.takeAttributesFrom(Attrs);
- return ParseDeclGroup(DS, Context, /*FunctionDefs=*/ false, &DeclEnd, FRI);
+ return ParseDeclGroup(DS, Context, &DeclEnd, FRI);
}
/// Returns true if this might be the start of a declarator, or a common typo
@@ -1548,7 +1626,6 @@ void Parser::SkipMalformedDecl() {
/// result.
Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
unsigned Context,
- bool AllowFunctionDefinitions,
SourceLocation *DeclEnd,
ForRangeInit *FRI) {
// Parse the first declarator.
@@ -1565,9 +1642,30 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// appropriate function scope after the function Decl has been constructed.
// These will be parsed in ParseFunctionDefinition or ParseLexedAttrList.
LateParsedAttrList LateParsedAttrs(true);
- if (D.isFunctionDeclarator())
+ if (D.isFunctionDeclarator()) {
MaybeParseGNUAttributes(D, &LateParsedAttrs);
+ // The _Noreturn keyword can't appear here, unlike the GNU noreturn
+ // attribute. If we find the keyword here, tell the user to put it
+ // at the start instead.
+ if (Tok.is(tok::kw__Noreturn)) {
+ SourceLocation Loc = ConsumeToken();
+ const char *PrevSpec;
+ unsigned DiagID;
+
+ // We can offer a fixit if it's valid to mark this function as _Noreturn
+ // and we don't have any other declarators in this declaration.
+ bool Fixit = !DS.setFunctionSpecNoreturn(Loc, PrevSpec, DiagID);
+ MaybeParseGNUAttributes(D, &LateParsedAttrs);
+ Fixit &= Tok.is(tok::semi) || Tok.is(tok::l_brace) || Tok.is(tok::kw_try);
+
+ Diag(Loc, diag::err_c11_noreturn_misplaced)
+ << (Fixit ? FixItHint::CreateRemoval(Loc) : FixItHint())
+ << (Fixit ? FixItHint::CreateInsertion(D.getLocStart(), "_Noreturn ")
+ : FixItHint());
+ }
+ }
+
// Check to see if we have a function *definition* which must have a body.
if (D.isFunctionDeclarator() &&
// Look at the next token to make sure that this isn't a function
@@ -1575,7 +1673,10 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// start of a function definition in GCC-extended K&R C.
!isDeclarationAfterDeclarator()) {
- if (AllowFunctionDefinitions) {
+ // Function definitions are only allowed at file scope and in C++ classes.
+ // The C++ inline method definition case is handled elsewhere, so we only
+ // need to handle the file scope definition case.
+ if (Context == Declarator::FileContext) {
if (isStartOfFunctionDefinition(D)) {
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
Diag(Tok, diag::err_function_declared_typedef);
@@ -1674,6 +1775,10 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// short x, __attribute__((common)) var; -> declarator
MaybeParseGNUAttributes(D);
+ // MSVC parses but ignores qualifiers after the comma as an extension.
+ if (getLangOpts().MicrosoftExt)
+ DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
+
ParseDeclarator(D);
if (!D.isInvalidType()) {
Decl *ThisDecl = ParseDeclarationAfterDeclarator(D);
@@ -2449,8 +2554,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs) {
if (DS.getSourceRange().isInvalid()) {
+ // Start the range at the current token but make the end of the range
+ // invalid. This will make the entire range invalid unless we successfully
+ // consume a token.
DS.SetRangeStart(Tok.getLocation());
- DS.SetRangeEnd(Tok.getLocation());
+ DS.SetRangeEnd(SourceLocation());
}
bool EnteringContext = (DSContext == DSC_class || DSContext == DSC_top_level);
@@ -2459,6 +2567,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
while (1) {
bool isInvalid = false;
+ bool isStorageClass = false;
const char *PrevSpec = nullptr;
unsigned DiagID = 0;
@@ -2728,6 +2837,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
goto DoneWithDeclSpec;
// typedef-name
+ case tok::kw___super:
case tok::kw_decltype:
case tok::identifier: {
// This identifier can only be a typedef name if we haven't already seen
@@ -2862,6 +2972,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
+ case tok::kw___vectorcall:
case tok::kw___unaligned:
ParseMicrosoftTypeAttributes(DS.getAttributes());
continue;
@@ -2880,22 +2991,26 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw_typedef:
isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_typedef, Loc,
PrevSpec, DiagID, Policy);
+ isStorageClass = true;
break;
case tok::kw_extern:
if (DS.getThreadStorageClassSpec() == DeclSpec::TSCS___thread)
Diag(Tok, diag::ext_thread_before) << "extern";
isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_extern, Loc,
PrevSpec, DiagID, Policy);
+ isStorageClass = true;
break;
case tok::kw___private_extern__:
isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_private_extern,
Loc, PrevSpec, DiagID, Policy);
+ isStorageClass = true;
break;
case tok::kw_static:
if (DS.getThreadStorageClassSpec() == DeclSpec::TSCS___thread)
Diag(Tok, diag::ext_thread_before) << "static";
isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_static, Loc,
PrevSpec, DiagID, Policy);
+ isStorageClass = true;
break;
case tok::kw_auto:
if (getLangOpts().CPlusPlus11) {
@@ -2911,18 +3026,22 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
} else
isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_auto, Loc,
PrevSpec, DiagID, Policy);
+ isStorageClass = true;
break;
case tok::kw_register:
isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_register, Loc,
PrevSpec, DiagID, Policy);
+ isStorageClass = true;
break;
case tok::kw_mutable:
isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_mutable, Loc,
PrevSpec, DiagID, Policy);
+ isStorageClass = true;
break;
case tok::kw___thread:
isInvalid = DS.SetStorageClassSpecThread(DeclSpec::TSCS___thread, Loc,
PrevSpec, DiagID);
+ isStorageClass = true;
break;
case tok::kw_thread_local:
isInvalid = DS.SetStorageClassSpecThread(DeclSpec::TSCS_thread_local, Loc,
@@ -2931,6 +3050,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw__Thread_local:
isInvalid = DS.SetStorageClassSpecThread(DeclSpec::TSCS__Thread_local,
Loc, PrevSpec, DiagID);
+ isStorageClass = true;
break;
// function-specifier
@@ -3083,6 +3203,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw___pixel:
isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID, Policy);
break;
+ case tok::kw___bool:
+ isInvalid = DS.SetTypeAltiVecBool(true, Loc, PrevSpec, DiagID, Policy);
+ break;
case tok::kw___unknown_anytype:
isInvalid = DS.SetTypeSpecType(TST_unknown_anytype, Loc,
PrevSpec, DiagID, Policy);
@@ -3169,6 +3292,15 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
break;
// OpenCL qualifiers:
+ case tok::kw___generic:
+ // generic address space is introduced only in OpenCL v2.0
+ // see OpenCL C Spec v2.0 s6.5.5
+ if (Actions.getLangOpts().OpenCLVersion < 200) {
+ DiagID = diag::err_opencl_unknown_type_specifier;
+ PrevSpec = Tok.getIdentifierInfo()->getNameStart();
+ isInvalid = true;
+ break;
+ };
case tok::kw___private:
case tok::kw___global:
case tok::kw___local:
@@ -3203,6 +3335,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (DiagID == diag::ext_duplicate_declspec)
Diag(Tok, DiagID)
<< PrevSpec << FixItHint::CreateRemoval(Tok.getLocation());
+ else if (DiagID == diag::err_opencl_unknown_type_specifier)
+ Diag(Tok, DiagID) << PrevSpec << isStorageClass;
else
Diag(Tok, DiagID) << PrevSpec;
}
@@ -3232,14 +3366,15 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
/// declarator[opt] ':' constant-expression
/// [GNU] declarator[opt] ':' constant-expression attributes[opt]
///
-void Parser::
-ParseStructDeclaration(ParsingDeclSpec &DS, FieldCallback &Fields) {
+void Parser::ParseStructDeclaration(
+ ParsingDeclSpec &DS,
+ llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback) {
if (Tok.is(tok::kw___extension__)) {
// __extension__ silences extension warnings in the subexpression.
ExtensionRAIIObject O(Diags); // Use RAII to do this.
ConsumeToken();
- return ParseStructDeclaration(DS, Fields);
+ return ParseStructDeclaration(DS, FieldsCallback);
}
// Parse the common specifier-qualifiers-list piece.
@@ -3271,7 +3406,8 @@ ParseStructDeclaration(ParsingDeclSpec &DS, FieldCallback &Fields) {
// Don't parse FOO:BAR as if it were a typo for FOO::BAR.
ColonProtectionRAIIObject X(*this);
ParseDeclarator(DeclaratorInfo.D);
- }
+ } else
+ DeclaratorInfo.D.SetIdentifier(nullptr, Tok.getLocation());
if (TryConsumeToken(tok::colon)) {
ExprResult Res(ParseConstantExpression());
@@ -3285,7 +3421,7 @@ ParseStructDeclaration(ParsingDeclSpec &DS, FieldCallback &Fields) {
MaybeParseGNUAttributes(DeclaratorInfo.D);
// We're done with this declarator; invoke the callback.
- Fields.invoke(DeclaratorInfo);
+ FieldsCallback(DeclaratorInfo);
// If we don't have a comma, it is either the end of the list (a ';')
// or an error, bail out.
@@ -3349,28 +3485,19 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
}
if (!Tok.is(tok::at)) {
- struct CFieldCallback : FieldCallback {
- Parser &P;
- Decl *TagDecl;
- SmallVectorImpl<Decl *> &FieldDecls;
-
- CFieldCallback(Parser &P, Decl *TagDecl,
- SmallVectorImpl<Decl *> &FieldDecls) :
- P(P), TagDecl(TagDecl), FieldDecls(FieldDecls) {}
-
- void invoke(ParsingFieldDeclarator &FD) override {
- // Install the declarator into the current TagDecl.
- Decl *Field = P.Actions.ActOnField(P.getCurScope(), TagDecl,
- FD.D.getDeclSpec().getSourceRange().getBegin(),
- FD.D, FD.BitfieldSize);
- FieldDecls.push_back(Field);
- FD.complete(Field);
- }
- } Callback(*this, TagDecl, FieldDecls);
+ auto CFieldCallback = [&](ParsingFieldDeclarator &FD) {
+ // Install the declarator into the current TagDecl.
+ Decl *Field =
+ Actions.ActOnField(getCurScope(), TagDecl,
+ FD.D.getDeclSpec().getSourceRange().getBegin(),
+ FD.D, FD.BitfieldSize);
+ FieldDecls.push_back(Field);
+ FD.complete(Field);
+ };
// Parse all the comma separated declarators.
ParsingDeclSpec DS(*this);
- ParseStructDeclaration(DS, Callback);
+ ParseStructDeclaration(DS, CFieldCallback);
} else { // Handle @defs
ConsumeToken();
if (!Tok.isObjCAtKeyword(tok::objc_defs)) {
@@ -3778,8 +3905,8 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
/// enumerator
/// enumerator-list ',' enumerator
/// enumerator:
-/// enumeration-constant
-/// enumeration-constant '=' constant-expression
+/// enumeration-constant attributes[opt]
+/// enumeration-constant attributes[opt] '=' constant-expression
/// enumeration-constant:
/// identifier
///
@@ -3816,8 +3943,13 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
// If attributes exist after the enumerator, parse them.
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseGNUAttributes(attrs);
- MaybeParseCXX11Attributes(attrs);
- ProhibitAttributes(attrs);
+ ProhibitAttributes(attrs); // GNU-style attributes are prohibited.
+ if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
+ if (!getLangOpts().CPlusPlus1z)
+ Diag(Tok.getLocation(), diag::warn_cxx14_compat_attribute)
+ << 1 /*enumerator*/;
+ ParseCXX11Attributes(attrs);
+ }
SourceLocation EqualLoc;
ExprResult AssignedVal;
@@ -3921,6 +4053,7 @@ bool Parser::isTypeQualifier() const {
case tok::kw___local:
case tok::kw___global:
case tok::kw___constant:
+ case tok::kw___generic:
case tok::kw___read_only:
case tok::kw___read_write:
case tok::kw___write_only:
@@ -4059,6 +4192,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
+ case tok::kw___vectorcall:
case tok::kw___w64:
case tok::kw___ptr64:
case tok::kw___ptr32:
@@ -4069,6 +4203,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw___local:
case tok::kw___global:
case tok::kw___constant:
+ case tok::kw___generic:
case tok::kw___read_only:
case tok::kw___read_write:
case tok::kw___write_only:
@@ -4227,6 +4362,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
+ case tok::kw___vectorcall:
case tok::kw___w64:
case tok::kw___sptr:
case tok::kw___uptr:
@@ -4240,6 +4376,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw___local:
case tok::kw___global:
case tok::kw___constant:
+ case tok::kw___generic:
case tok::kw___read_only:
case tok::kw___read_write:
case tok::kw___write_only:
@@ -4373,20 +4510,18 @@ bool Parser::isConstructorDeclarator(bool IsUnqualified) {
/// type-qualifier-list: [C99 6.7.5]
/// type-qualifier
/// [vendor] attributes
-/// [ only if VendorAttributesAllowed=true ]
+/// [ only if AttrReqs & AR_VendorAttributesParsed ]
/// type-qualifier-list type-qualifier
/// [vendor] type-qualifier-list attributes
-/// [ only if VendorAttributesAllowed=true ]
+/// [ only if AttrReqs & AR_VendorAttributesParsed ]
/// [C++0x] attribute-specifier[opt] is allowed before cv-qualifier-seq
-/// [ only if CXX11AttributesAllowed=true ]
-/// Note: vendor can be GNU, MS, etc.
-///
-void Parser::ParseTypeQualifierListOpt(DeclSpec &DS,
- bool VendorAttributesAllowed,
- bool CXX11AttributesAllowed,
+/// [ only if AttReqs & AR_CXX11AttributesParsed ]
+/// Note: vendor can be GNU, MS, etc and can be explicitly controlled via
+/// AttrRequirements bitmask values.
+void Parser::ParseTypeQualifierListOpt(DeclSpec &DS, unsigned AttrReqs,
bool AtomicAllowed,
bool IdentifierRequired) {
- if (getLangOpts().CPlusPlus11 && CXX11AttributesAllowed &&
+ if (getLangOpts().CPlusPlus11 && (AttrReqs & AR_CXX11AttributesParsed) &&
isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
ParseCXX11Attributes(attrs);
@@ -4430,6 +4565,7 @@ void Parser::ParseTypeQualifierListOpt(DeclSpec &DS,
case tok::kw___global:
case tok::kw___local:
case tok::kw___constant:
+ case tok::kw___generic:
case tok::kw___read_only:
case tok::kw___write_only:
case tok::kw___read_write:
@@ -4439,7 +4575,7 @@ void Parser::ParseTypeQualifierListOpt(DeclSpec &DS,
case tok::kw___uptr:
// GNU libc headers in C mode use '__uptr' as an identifer which conflicts
// with the MS modifier keyword.
- if (VendorAttributesAllowed && !getLangOpts().CPlusPlus &&
+ if ((AttrReqs & AR_DeclspecAttributesParsed) && !getLangOpts().CPlusPlus &&
IdentifierRequired && DS.isEmpty() && NextToken().is(tok::semi)) {
if (TryKeywordIdentFallback(false))
continue;
@@ -4452,20 +4588,28 @@ void Parser::ParseTypeQualifierListOpt(DeclSpec &DS,
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
+ case tok::kw___vectorcall:
case tok::kw___unaligned:
- if (VendorAttributesAllowed) {
+ if (AttrReqs & AR_DeclspecAttributesParsed) {
ParseMicrosoftTypeAttributes(DS.getAttributes());
continue;
}
goto DoneWithTypeQuals;
case tok::kw___pascal:
- if (VendorAttributesAllowed) {
+ if (AttrReqs & AR_VendorAttributesParsed) {
ParseBorlandTypeAttributes(DS.getAttributes());
continue;
}
goto DoneWithTypeQuals;
case tok::kw___attribute:
- if (VendorAttributesAllowed) {
+ if (AttrReqs & AR_GNUAttributesParsedAndRejected)
+ // When GNU attributes are expressly forbidden, diagnose their usage.
+ Diag(Tok, diag::err_attributes_not_allowed);
+
+ // Parse the attributes even if they are rejected to ensure that error
+ // recovery is graceful.
+ if (AttrReqs & AR_GNUAttributesParsed ||
+ AttrReqs & AR_GNUAttributesParsedAndRejected) {
ParseGNUAttributes(DS.getAttributes());
continue; // do *not* consume the next token!
}
@@ -4498,15 +4642,27 @@ void Parser::ParseDeclarator(Declarator &D) {
ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
}
-static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang) {
+static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang,
+ unsigned TheContext) {
if (Kind == tok::star || Kind == tok::caret)
return true;
- // We parse rvalue refs in C++03, because otherwise the errors are scary.
if (!Lang.CPlusPlus)
return false;
- return Kind == tok::amp || Kind == tok::ampamp;
+ if (Kind == tok::amp)
+ return true;
+
+ // We parse rvalue refs in C++03, because otherwise the errors are scary.
+ // But we must not parse them in conversion-type-ids and new-type-ids, since
+ // those can be legitimately followed by a && operator.
+ // (The same thing can in theory happen after a trailing-return-type, but
+ // since those are a C++11 feature, there is no rejects-valid issue there.)
+ if (Kind == tok::ampamp)
+ return Lang.CPlusPlus11 || (TheContext != Declarator::ConversionIdContext &&
+ TheContext != Declarator::CXXNewContext);
+
+ return false;
}
/// ParseDeclaratorInternal - Parse a C or C++ declarator. The direct-declarator
@@ -4577,7 +4733,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// Sema will have to catch (syntactically invalid) pointers into global
// scope. It has to catch pointers into namespace scope anyway.
D.AddTypeInfo(DeclaratorChunk::getMemberPointer(SS,DS.getTypeQualifiers(),
- Loc),
+ DS.getLocEnd()),
DS.getAttributes(),
/* Don't replace range end. */SourceLocation());
return;
@@ -4586,7 +4742,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
tok::TokenKind Kind = Tok.getKind();
// Not a pointer, C++ reference, or block.
- if (!isPtrOperatorToken(Kind, getLangOpts())) {
+ if (!isPtrOperatorToken(Kind, getLangOpts(), D.getContext())) {
if (DirectDeclParser)
(this->*DirectDeclParser)(D);
return;
@@ -4601,8 +4757,13 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// Is a pointer.
DeclSpec DS(AttrFactory);
- // FIXME: GNU attributes are not allowed here in a new-type-id.
- ParseTypeQualifierListOpt(DS, true, true, true, !D.mayOmitIdentifier());
+ // GNU attributes are not allowed here in a new-type-id, but Declspec and
+ // C++11 attributes are allowed.
+ unsigned Reqs = AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed |
+ ((D.getContext() != Declarator::CXXNewContext)
+ ? AR_GNUAttributesParsed
+ : AR_GNUAttributesParsedAndRejected);
+ ParseTypeQualifierListOpt(DS, Reqs, true, !D.mayOmitIdentifier());
D.ExtendWithDeclSpec(DS);
// Recursively parse the declarator.
@@ -4762,21 +4923,22 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
}
// C++0x [dcl.fct]p14:
- // There is a syntactic ambiguity when an ellipsis occurs at the end
- // of a parameter-declaration-clause without a preceding comma. In
- // this case, the ellipsis is parsed as part of the
- // abstract-declarator if the type of the parameter names a template
- // parameter pack that has not been expanded; otherwise, it is parsed
- // as part of the parameter-declaration-clause.
+ // There is a syntactic ambiguity when an ellipsis occurs at the end of a
+ // parameter-declaration-clause without a preceding comma. In this case,
+ // the ellipsis is parsed as part of the abstract-declarator if the type
+ // of the parameter either names a template parameter pack that has not
+ // been expanded or contains auto; otherwise, it is parsed as part of the
+ // parameter-declaration-clause.
if (Tok.is(tok::ellipsis) && D.getCXXScopeSpec().isEmpty() &&
!((D.getContext() == Declarator::PrototypeContext ||
D.getContext() == Declarator::LambdaExprParameterContext ||
D.getContext() == Declarator::BlockLiteralContext) &&
NextToken().is(tok::r_paren) &&
!D.hasGroupingParens() &&
- !Actions.containsUnexpandedParameterPacks(D))) {
+ !Actions.containsUnexpandedParameterPacks(D) &&
+ D.getDeclSpec().getTypeSpecType() != TST_auto)) {
SourceLocation EllipsisLoc = ConsumeToken();
- if (isPtrOperatorToken(Tok.getKind(), getLangOpts())) {
+ if (isPtrOperatorToken(Tok.getKind(), getLangOpts(), D.getContext())) {
// The ellipsis was put in the wrong place. Recover, and explain to
// the user what they should have done.
ParseDeclarator(D);
@@ -5102,11 +5264,13 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
SourceLocation RefQualifierLoc;
SourceLocation ConstQualifierLoc;
SourceLocation VolatileQualifierLoc;
+ SourceLocation RestrictQualifierLoc;
ExceptionSpecificationType ESpecType = EST_None;
SourceRange ESpecRange;
SmallVector<ParsedType, 2> DynamicExceptions;
SmallVector<SourceRange, 2> DynamicExceptionRanges;
ExprResult NoexceptExpr;
+ CachedTokens *ExceptionSpecTokens = 0;
ParsedAttributes FnAttrs(AttrFactory);
TypeResult TrailingReturnType;
@@ -5149,13 +5313,13 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
// with the virt-specifier-seq and pure-specifier in the same way.
// Parse cv-qualifier-seq[opt].
- ParseTypeQualifierListOpt(DS, /*VendorAttributesAllowed*/ false,
- /*CXX11AttributesAllowed*/ false,
+ ParseTypeQualifierListOpt(DS, AR_NoAttributesParsed,
/*AtomicAllowed*/ false);
if (!DS.getSourceRange().getEnd().isInvalid()) {
EndLoc = DS.getSourceRange().getEnd();
ConstQualifierLoc = DS.getConstSpecLoc();
VolatileQualifierLoc = DS.getVolatileSpecLoc();
+ RestrictQualifierLoc = DS.getRestrictSpecLoc();
}
// Parse ref-qualifier[opt].
@@ -5188,15 +5352,36 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
dyn_cast<CXXRecordDecl>(Actions.CurContext),
DS.getTypeQualifiers() |
(D.getDeclSpec().isConstexprSpecified() &&
- !getLangOpts().CPlusPlus1y
+ !getLangOpts().CPlusPlus14
? Qualifiers::Const : 0),
IsCXX11MemberFunction);
// Parse exception-specification[opt].
- ESpecType = tryParseExceptionSpecification(ESpecRange,
+ bool Delayed = D.isFirstDeclarationOfMember() &&
+ D.isFunctionDeclaratorAFunctionDeclaration();
+ if (Delayed && Actions.isLibstdcxxEagerExceptionSpecHack(D) &&
+ GetLookAheadToken(0).is(tok::kw_noexcept) &&
+ GetLookAheadToken(1).is(tok::l_paren) &&
+ GetLookAheadToken(2).is(tok::kw_noexcept) &&
+ GetLookAheadToken(3).is(tok::l_paren) &&
+ GetLookAheadToken(4).is(tok::identifier) &&
+ GetLookAheadToken(4).getIdentifierInfo()->isStr("swap")) {
+ // HACK: We've got an exception-specification
+ // noexcept(noexcept(swap(...)))
+ // or
+ // noexcept(noexcept(swap(...)) && noexcept(swap(...)))
+ // on a 'swap' member function. This is a libstdc++ bug; the lookup
+ // for 'swap' will only find the function we're currently declaring,
+ // whereas it expects to find a non-member swap through ADL. Turn off
+ // delayed parsing to give it a chance to find what it expects.
+ Delayed = false;
+ }
+ ESpecType = tryParseExceptionSpecification(Delayed,
+ ESpecRange,
DynamicExceptions,
DynamicExceptionRanges,
- NoexceptExpr);
+ NoexceptExpr,
+ ExceptionSpecTokens);
if (ESpecType != EST_None)
EndLoc = ESpecRange.getEnd();
@@ -5228,6 +5413,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
RefQualifierIsLValueRef,
RefQualifierLoc, ConstQualifierLoc,
VolatileQualifierLoc,
+ RestrictQualifierLoc,
/*MutableLoc=*/SourceLocation(),
ESpecType, ESpecRange.getBegin(),
DynamicExceptions.data(),
@@ -5235,6 +5421,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
DynamicExceptions.size(),
NoexceptExpr.isUsable() ?
NoexceptExpr.get() : nullptr,
+ ExceptionSpecTokens,
StartLoc, LocalEndLoc, D,
TrailingReturnType),
FnAttrs, EndLoc);
@@ -5306,7 +5493,7 @@ void Parser::ParseFunctionDeclaratorIdentifierList(
Diag(Tok, diag::err_unexpected_typedef_ident) << ParmII;
// Verify that the argument identifier has not already been mentioned.
- if (!ParamsSoFar.insert(ParmII)) {
+ if (!ParamsSoFar.insert(ParmII).second) {
Diag(Tok, diag::err_param_redefinition) << ParmII;
} else {
// Remember this identifier in ParamInfo.
@@ -5414,10 +5601,18 @@ void Parser::ParseParameterDeclarationClause(
// Otherwise, we have something. Add it and let semantic analysis try
// to grok it and add the result to the ParamInfo we are building.
+ // Last chance to recover from a misplaced ellipsis in an attempted
+ // parameter pack declaration.
+ if (Tok.is(tok::ellipsis) &&
+ (NextToken().isNot(tok::r_paren) ||
+ (!ParmDeclarator.getEllipsisLoc().isValid() &&
+ !Actions.isUnexpandedParameterPackPermitted())) &&
+ Actions.containsUnexpandedParameterPacks(ParmDeclarator))
+ DiagnoseMisplacedEllipsisInDeclarator(ConsumeToken(), ParmDeclarator);
+
// Inform the actions module about the parameter declarator, so it gets
// added to the current scope.
- Decl *Param = Actions.ActOnParamDeclarator(getCurScope(),
- ParmDeclarator);
+ Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDeclarator);
// Parse the default argument, if any. We parse the default
// arguments in all dialects; the semantic analysis in
// ActOnParamDefaultArgument will reject the default argument in
@@ -5433,20 +5628,14 @@ void Parser::ParseParameterDeclarationClause(
// FIXME: Can we use a smart pointer for Toks?
DefArgToks = new CachedTokens;
+ SourceLocation ArgStartLoc = NextToken().getLocation();
if (!ConsumeAndStoreInitializer(*DefArgToks, CIK_DefaultArgument)) {
delete DefArgToks;
DefArgToks = nullptr;
Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
} else {
- // Mark the end of the default argument so that we know when to
- // stop when we parse it later on.
- Token DefArgEnd;
- DefArgEnd.startToken();
- DefArgEnd.setKind(tok::cxx_defaultarg_end);
- DefArgEnd.setLocation(Tok.getLocation());
- DefArgToks->push_back(DefArgEnd);
Actions.ActOnParamUnparsedDefaultArgument(Param, EqualLoc,
- (*DefArgToks)[1].getLocation());
+ ArgStartLoc);
}
} else {
// Consume the '='.
@@ -5464,6 +5653,7 @@ void Parser::ParseParameterDeclarationClause(
DefArgResult = ParseBraceInitializer();
} else
DefArgResult = ParseAssignmentExpression();
+ DefArgResult = Actions.CorrectDelayedTyposInExpr(DefArgResult);
if (DefArgResult.isInvalid()) {
Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
SkipUntil(tok::comma, tok::r_paren, StopAtSemi | StopBeforeMatch);
@@ -5480,12 +5670,34 @@ void Parser::ParseParameterDeclarationClause(
Param, DefArgToks));
}
- if (TryConsumeToken(tok::ellipsis, EllipsisLoc) &&
- !getLangOpts().CPlusPlus) {
- // We have ellipsis without a preceding ',', which is ill-formed
- // in C. Complain and provide the fix.
- Diag(EllipsisLoc, diag::err_missing_comma_before_ellipsis)
+ if (TryConsumeToken(tok::ellipsis, EllipsisLoc)) {
+ if (!getLangOpts().CPlusPlus) {
+ // We have ellipsis without a preceding ',', which is ill-formed
+ // in C. Complain and provide the fix.
+ Diag(EllipsisLoc, diag::err_missing_comma_before_ellipsis)
+ << FixItHint::CreateInsertion(EllipsisLoc, ", ");
+ } else if (ParmDeclarator.getEllipsisLoc().isValid() ||
+ Actions.containsUnexpandedParameterPacks(ParmDeclarator)) {
+ // It looks like this was supposed to be a parameter pack. Warn and
+ // point out where the ellipsis should have gone.
+ SourceLocation ParmEllipsis = ParmDeclarator.getEllipsisLoc();
+ Diag(EllipsisLoc, diag::warn_misplaced_ellipsis_vararg)
+ << ParmEllipsis.isValid() << ParmEllipsis;
+ if (ParmEllipsis.isValid()) {
+ Diag(ParmEllipsis,
+ diag::note_misplaced_ellipsis_vararg_existing_ellipsis);
+ } else {
+ Diag(ParmDeclarator.getIdentifierLoc(),
+ diag::note_misplaced_ellipsis_vararg_add_ellipsis)
+ << FixItHint::CreateInsertion(ParmDeclarator.getIdentifierLoc(),
+ "...")
+ << !ParmDeclarator.hasName();
+ }
+ Diag(EllipsisLoc, diag::note_misplaced_ellipsis_vararg_add_comma)
<< FixItHint::CreateInsertion(EllipsisLoc, ", ");
+ }
+
+ // We can't have any more parameters after an ellipsis.
break;
}
@@ -5546,7 +5758,7 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
// If there is a type-qualifier-list, read it now.
// Type qualifiers in an array subscript are a C99 feature.
DeclSpec DS(AttrFactory);
- ParseTypeQualifierListOpt(DS, false /*no attributes*/);
+ ParseTypeQualifierListOpt(DS, AR_CXX11AttributesParsed);
// If we haven't already read 'static', check to see if there is one after the
// type-qualifier-list.
@@ -5582,7 +5794,13 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
} else {
EnterExpressionEvaluationContext Unevaluated(Actions,
Sema::ConstantEvaluated);
- NumElements = ParseAssignmentExpression();
+ NumElements =
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ }
+ } else {
+ if (StaticLoc.isValid()) {
+ Diag(StaticLoc, diag::err_unspecified_size_with_static);
+ StaticLoc = SourceLocation(); // Drop the static.
}
}
@@ -5716,8 +5934,8 @@ void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
bool isCastExpr;
ParsedType CastTy;
SourceRange CastRange;
- ExprResult Operand = ParseExprAfterUnaryExprOrTypeTrait(OpTok, isCastExpr,
- CastTy, CastRange);
+ ExprResult Operand = Actions.CorrectDelayedTyposInExpr(
+ ParseExprAfterUnaryExprOrTypeTrait(OpTok, isCastExpr, CastTy, CastRange));
if (hasParens)
DS.setTypeofParensRange(CastRange);
@@ -5817,6 +6035,7 @@ bool Parser::TryAltiVecVectorTokenOutOfLine() {
case tok::kw_float:
case tok::kw_double:
case tok::kw_bool:
+ case tok::kw___bool:
case tok::kw___pixel:
Tok.setKind(tok::kw___vector);
return true;
@@ -5850,6 +6069,7 @@ bool Parser::TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
case tok::kw_float:
case tok::kw_double:
case tok::kw_bool:
+ case tok::kw___bool:
case tok::kw___pixel:
isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID, Policy);
return true;
diff --git a/lib/Parse/ParseDeclCXX.cpp b/lib/Parse/ParseDeclCXX.cpp
index 6200363b3bc5..87d99096440c 100644
--- a/lib/Parse/ParseDeclCXX.cpp
+++ b/lib/Parse/ParseDeclCXX.cpp
@@ -17,8 +17,8 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/CharInfo.h"
-#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
@@ -73,7 +73,15 @@ Decl *Parser::ParseNamespace(unsigned Context,
std::vector<IdentifierInfo*> ExtraIdent;
std::vector<SourceLocation> ExtraNamespaceLoc;
- Token attrTok;
+ ParsedAttributesWithRange attrs(AttrFactory);
+ SourceLocation attrLoc;
+ if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
+ if (!getLangOpts().CPlusPlus1z)
+ Diag(Tok.getLocation(), diag::warn_cxx14_compat_attribute)
+ << 0 /*namespace*/;
+ attrLoc = Tok.getLocation();
+ ParseCXX11Attributes(attrs);
+ }
if (Tok.is(tok::identifier)) {
Ident = Tok.getIdentifierInfo();
@@ -85,10 +93,13 @@ Decl *Parser::ParseNamespace(unsigned Context,
}
}
+ // A nested namespace definition cannot have attributes.
+ if (!ExtraNamespaceLoc.empty() && attrLoc.isValid())
+ Diag(attrLoc, diag::err_unexpected_nested_namespace_attribute);
+
// Read label attributes, if present.
- ParsedAttributes attrs(AttrFactory);
if (Tok.is(tok::kw___attribute)) {
- attrTok = Tok;
+ attrLoc = Tok.getLocation();
ParseGNUAttributes(attrs);
}
@@ -99,8 +110,8 @@ Decl *Parser::ParseNamespace(unsigned Context,
SkipUntil(tok::semi);
return nullptr;
}
- if (!attrs.empty())
- Diag(attrTok, diag::err_unexpected_namespace_attributes_alias);
+ if (attrLoc.isValid())
+ Diag(attrLoc, diag::err_unexpected_namespace_attributes_alias);
if (InlineLoc.isValid())
Diag(InlineLoc, diag::err_inline_namespace_alias)
<< FixItHint::CreateRemoval(InlineLoc);
@@ -110,39 +121,36 @@ Decl *Parser::ParseNamespace(unsigned Context,
BalancedDelimiterTracker T(*this, tok::l_brace);
if (T.consumeOpen()) {
- if (!ExtraIdent.empty()) {
- Diag(ExtraNamespaceLoc[0], diag::err_nested_namespaces_with_double_colon)
- << SourceRange(ExtraNamespaceLoc.front(), ExtraIdentLoc.back());
- }
-
if (Ident)
Diag(Tok, diag::err_expected) << tok::l_brace;
else
Diag(Tok, diag::err_expected_either) << tok::identifier << tok::l_brace;
-
return nullptr;
}
if (getCurScope()->isClassScope() || getCurScope()->isTemplateParamScope() ||
getCurScope()->isInObjcMethodScope() || getCurScope()->getBlockParent() ||
getCurScope()->getFnParent()) {
- if (!ExtraIdent.empty()) {
- Diag(ExtraNamespaceLoc[0], diag::err_nested_namespaces_with_double_colon)
- << SourceRange(ExtraNamespaceLoc.front(), ExtraIdentLoc.back());
- }
Diag(T.getOpenLocation(), diag::err_namespace_nonnamespace_scope);
SkipUntil(tok::r_brace);
return nullptr;
}
- if (!ExtraIdent.empty()) {
+ if (ExtraIdent.empty()) {
+ // Normal namespace definition, not a nested-namespace-definition.
+ } else if (InlineLoc.isValid()) {
+ Diag(InlineLoc, diag::err_inline_nested_namespace_definition);
+ } else if (getLangOpts().CPlusPlus1z) {
+ Diag(ExtraNamespaceLoc[0],
+ diag::warn_cxx14_compat_nested_namespace_definition);
+ } else {
TentativeParsingAction TPA(*this);
SkipUntil(tok::r_brace, StopBeforeMatch);
Token rBraceToken = Tok;
TPA.Revert();
if (!rBraceToken.is(tok::r_brace)) {
- Diag(ExtraNamespaceLoc[0], diag::err_nested_namespaces_with_double_colon)
+ Diag(ExtraNamespaceLoc[0], diag::ext_nested_namespace_definition)
<< SourceRange(ExtraNamespaceLoc.front(), ExtraIdentLoc.back());
} else {
std::string NamespaceFix;
@@ -156,7 +164,7 @@ Decl *Parser::ParseNamespace(unsigned Context,
for (unsigned i = 0, e = ExtraIdent.size(); i != e; ++i)
RBraces += "} ";
- Diag(ExtraNamespaceLoc[0], diag::err_nested_namespaces_with_double_colon)
+ Diag(ExtraNamespaceLoc[0], diag::ext_nested_namespace_definition)
<< FixItHint::CreateReplacement(SourceRange(ExtraNamespaceLoc.front(),
ExtraIdentLoc.back()),
NamespaceFix)
@@ -195,11 +203,11 @@ Decl *Parser::ParseNamespace(unsigned Context,
}
/// ParseInnerNamespace - Parse the contents of a namespace.
-void Parser::ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc,
- std::vector<IdentifierInfo*>& Ident,
- std::vector<SourceLocation>& NamespaceLoc,
- unsigned int index, SourceLocation& InlineLoc,
- ParsedAttributes& attrs,
+void Parser::ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
+ std::vector<IdentifierInfo *> &Ident,
+ std::vector<SourceLocation> &NamespaceLoc,
+ unsigned int index, SourceLocation &InlineLoc,
+ ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker) {
if (index == Ident.size()) {
while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
@@ -216,7 +224,9 @@ void Parser::ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc,
return;
}
- // Parse improperly nested namespaces.
+ // Handle a nested namespace definition.
+ // FIXME: Preserve the source information through to the AST rather than
+ // desugaring it here.
ParseScope NamespaceScope(this, Scope::DeclScope);
Decl *NamespcDecl =
Actions.ActOnStartNamespaceDef(getCurScope(), SourceLocation(),
@@ -493,6 +503,12 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
if (TryConsumeToken(tok::kw_typename, TypenameLoc))
HasTypenameKeyword = true;
+ if (Tok.is(tok::kw___super)) {
+ Diag(Tok.getLocation(), diag::err_super_in_using_declaration);
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
// Parse nested-name-specifier.
IdentifierInfo *LastII = nullptr;
ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false,
@@ -692,7 +708,7 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
ExprResult AssertMessage;
if (Tok.is(tok::r_paren)) {
Diag(Tok, getLangOpts().CPlusPlus1z
- ? diag::warn_cxx1y_compat_static_assert_no_message
+ ? diag::warn_cxx14_compat_static_assert_no_message
: diag::ext_static_assert_no_message)
<< (getLangOpts().CPlusPlus1z
? FixItHint()
@@ -769,7 +785,7 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
// because the typename-specifier in a function-style cast operation can't
// be 'auto'.
Diag(Tok.getLocation(),
- getLangOpts().CPlusPlus1y
+ getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_decltype_auto_type_specifier
: diag::ext_decltype_auto_type_specifier);
ConsumeToken();
@@ -780,7 +796,7 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
// The operand of the decltype specifier is an unevaluated operand.
EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated,
nullptr,/*IsDecltype=*/true);
- Result = ParseExpression();
+ Result = Actions.CorrectDelayedTyposInExpr(ParseExpression());
if (Result.isInvalid()) {
DS.SetTypeSpecError();
if (SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch)) {
@@ -903,8 +919,8 @@ void Parser::ParseUnderlyingTypeSpecifier(DeclSpec &DS) {
/// In C++98, instead of base-type-specifier, we have:
///
/// ::[opt] nested-name-specifier[opt] class-name
-Parser::TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
- SourceLocation &EndLocation) {
+TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
+ SourceLocation &EndLocation) {
// Ignore attempts to use typename
if (Tok.is(tok::kw_typename)) {
Diag(Tok, diag::err_expected_class_name_not_template)
@@ -1066,6 +1082,11 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
case tok::comma: // __builtin_offsetof(struct foo{...} ,
case tok::kw_operator: // struct foo operator ++() {...}
case tok::kw___declspec: // struct foo {...} __declspec(...)
+ case tok::l_square: // void f(struct f [ 3])
+ case tok::ellipsis: // void f(struct f ... [Ns])
+ // FIXME: we should emit semantic diagnostic when declaration
+ // attribute is in type attribute position.
+ case tok::kw___attribute: // struct foo __attribute__((used)) x;
return true;
case tok::colon:
return CouldBeBitfield; // enum E { ... } : 2;
@@ -1073,6 +1094,8 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
case tok::kw_const: // struct foo {...} const x;
case tok::kw_volatile: // struct foo {...} volatile x;
case tok::kw_restrict: // struct foo {...} restrict x;
+ case tok::kw__Atomic: // struct foo {...} _Atomic x;
+ case tok::kw___unaligned: // struct foo {...} __unaligned *x;
// Function specifiers
// Note, no 'explicit'. An explicit function must be either a conversion
// operator or a constructor. Either way, it can't have a return type.
@@ -1111,10 +1134,6 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
if (!getLangOpts().CPlusPlus)
return true;
break;
- // C++11 attributes
- case tok::l_square: // enum E [[]] x
- // Note, no tok::kw_alignas here; alignas cannot appertain to a type.
- return getLangOpts().CPlusPlus11 && NextToken().is(tok::l_square);
case tok::greater:
// template<class T = class X>
return getLangOpts().CPlusPlus;
@@ -1224,17 +1243,65 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// C++11 attributes
SourceLocation AttrFixitLoc = Tok.getLocation();
- // GNU libstdc++ and libc++ use certain intrinsic names as the
- // name of struct templates, but some are keywords in GCC >= 4.3
- // MSVC and Clang. For compatibility, convert the token to an identifier
- // and issue a warning diagnostic.
- if (TagType == DeclSpec::TST_struct && !Tok.is(tok::identifier) &&
- !Tok.isAnnotation()) {
- const IdentifierInfo *II = Tok.getIdentifierInfo();
- // We rarely end up here so the following check is efficient.
- if (II && II->getName().startswith("__is_"))
- TryKeywordIdentFallback(true);
- }
+ if (TagType == DeclSpec::TST_struct &&
+ Tok.isNot(tok::identifier) &&
+ !Tok.isAnnotation() &&
+ Tok.getIdentifierInfo() &&
+ (Tok.is(tok::kw___is_abstract) ||
+ Tok.is(tok::kw___is_arithmetic) ||
+ Tok.is(tok::kw___is_array) ||
+ Tok.is(tok::kw___is_base_of) ||
+ Tok.is(tok::kw___is_class) ||
+ Tok.is(tok::kw___is_complete_type) ||
+ Tok.is(tok::kw___is_compound) ||
+ Tok.is(tok::kw___is_const) ||
+ Tok.is(tok::kw___is_constructible) ||
+ Tok.is(tok::kw___is_convertible) ||
+ Tok.is(tok::kw___is_convertible_to) ||
+ Tok.is(tok::kw___is_destructible) ||
+ Tok.is(tok::kw___is_empty) ||
+ Tok.is(tok::kw___is_enum) ||
+ Tok.is(tok::kw___is_floating_point) ||
+ Tok.is(tok::kw___is_final) ||
+ Tok.is(tok::kw___is_function) ||
+ Tok.is(tok::kw___is_fundamental) ||
+ Tok.is(tok::kw___is_integral) ||
+ Tok.is(tok::kw___is_interface_class) ||
+ Tok.is(tok::kw___is_literal) ||
+ Tok.is(tok::kw___is_lvalue_expr) ||
+ Tok.is(tok::kw___is_lvalue_reference) ||
+ Tok.is(tok::kw___is_member_function_pointer) ||
+ Tok.is(tok::kw___is_member_object_pointer) ||
+ Tok.is(tok::kw___is_member_pointer) ||
+ Tok.is(tok::kw___is_nothrow_assignable) ||
+ Tok.is(tok::kw___is_nothrow_constructible) ||
+ Tok.is(tok::kw___is_nothrow_destructible) ||
+ Tok.is(tok::kw___is_object) ||
+ Tok.is(tok::kw___is_pod) ||
+ Tok.is(tok::kw___is_pointer) ||
+ Tok.is(tok::kw___is_polymorphic) ||
+ Tok.is(tok::kw___is_reference) ||
+ Tok.is(tok::kw___is_rvalue_expr) ||
+ Tok.is(tok::kw___is_rvalue_reference) ||
+ Tok.is(tok::kw___is_same) ||
+ Tok.is(tok::kw___is_scalar) ||
+ Tok.is(tok::kw___is_sealed) ||
+ Tok.is(tok::kw___is_signed) ||
+ Tok.is(tok::kw___is_standard_layout) ||
+ Tok.is(tok::kw___is_trivial) ||
+ Tok.is(tok::kw___is_trivially_assignable) ||
+ Tok.is(tok::kw___is_trivially_constructible) ||
+ Tok.is(tok::kw___is_trivially_copyable) ||
+ Tok.is(tok::kw___is_union) ||
+ Tok.is(tok::kw___is_unsigned) ||
+ Tok.is(tok::kw___is_void) ||
+ Tok.is(tok::kw___is_volatile)))
+ // GNU libstdc++ 4.2 and libc++ use certain intrinsic names as the
+ // name of struct templates, but some are keywords in GCC >= 4.3
+ // and Clang. Therefore, when we see the token sequence "struct
+ // X", make X into a normal identifier rather than a keyword, to
+ // allow libstdc++ 4.2 and libc++ to work properly.
+ TryKeywordIdentFallback(true);
// Parse the (optional) nested-name-specifier.
CXXScopeSpec &SS = DS.getTypeSpecScope();
@@ -1669,7 +1736,11 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Also enforce C++ [temp]p3:
// In a template-declaration which defines a class, no declarator
// is permitted.
+ //
+ // After a type-specifier, we don't expect a semicolon. This only happens in
+ // C, since definitions are not permitted in this context in C++.
if (TUK == Sema::TUK_Definition &&
+ (getLangOpts().CPlusPlus || !isTypeSpecifier(DSC)) &&
(TemplateInfo.Kind || !isValidAfterTypeSpecifier(false))) {
if (Tok.isNot(tok::semi)) {
const PrintingPolicy &PPol = Actions.getASTContext().getPrintingPolicy();
@@ -1731,7 +1802,7 @@ void Parser::ParseBaseClause(Decl *ClassDecl) {
/// base-type-specifier
/// attribute-specifier-seq[opt] access-specifier 'virtual'[opt]
/// base-type-specifier
-Parser::BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
+BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
bool IsVirtual = false;
SourceLocation StartLoc = Tok.getLocation();
@@ -1806,16 +1877,34 @@ AccessSpecifier Parser::getAccessSpecifierIfPresent() const {
}
/// \brief If the given declarator has any parts for which parsing has to be
-/// delayed, e.g., default arguments, create a late-parsed method declaration
-/// record to handle the parsing at the end of the class definition.
+/// delayed, e.g., default arguments or an exception-specification, create a
+/// late-parsed method declaration record to handle the parsing at the end of
+/// the class definition.
void Parser::HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl) {
// We just declared a member function. If this member function
- // has any default arguments, we'll need to parse them later.
+ // has any default arguments or an exception-specification, we'll need to
+ // parse them later.
LateParsedMethodDeclaration *LateMethod = nullptr;
DeclaratorChunk::FunctionTypeInfo &FTI
= DeclaratorInfo.getFunctionTypeInfo();
+ // If there was a late-parsed exception-specification, hold onto its tokens.
+ if (FTI.getExceptionSpecType() == EST_Unparsed) {
+ // Push this method onto the stack of late-parsed method
+ // declarations.
+ LateMethod = new LateParsedMethodDeclaration(this, ThisDecl);
+ getCurrentClass().LateParsedDeclarations.push_back(LateMethod);
+ LateMethod->TemplateScope = getCurScope()->isTemplateParamScope();
+
+ // Stash the exception-specification tokens in the late-pased mthod.
+ LateMethod->ExceptionSpecTokens = FTI.ExceptionSpecTokens;
+ FTI.ExceptionSpecTokens = 0;
+
+ // Reserve space for the parameters.
+ LateMethod->DefaultArgs.reserve(FTI.NumParams);
+ }
+
for (unsigned ParamIdx = 0; ParamIdx < FTI.NumParams; ++ParamIdx) {
if (LateMethod || FTI.Params[ParamIdx].DefaultArgTokens) {
if (!LateMethod) {
@@ -1879,12 +1968,22 @@ VirtSpecifiers::Specifier Parser::isCXX11VirtSpecifier(const Token &Tok) const {
/// virt-specifier
/// virt-specifier-seq virt-specifier
void Parser::ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS,
- bool IsInterface) {
+ bool IsInterface,
+ SourceLocation FriendLoc) {
while (true) {
VirtSpecifiers::Specifier Specifier = isCXX11VirtSpecifier();
if (Specifier == VirtSpecifiers::VS_None)
return;
+ if (FriendLoc.isValid()) {
+ Diag(Tok.getLocation(), diag::err_friend_decl_spec)
+ << VirtSpecifiers::getSpecifierName(Specifier)
+ << FixItHint::CreateRemoval(Tok.getLocation())
+ << SourceRange(FriendLoc, FriendLoc);
+ ConsumeToken();
+ continue;
+ }
+
// C++ [class.mem]p8:
// A virt-specifier-seq shall contain at most one of each virt-specifier.
const char *PrevSpec = nullptr;
@@ -1929,13 +2028,19 @@ void Parser::ParseCXXMemberDeclaratorBeforeInitializer(
// identifier[opt] ':' constant-expression
if (Tok.isNot(tok::colon))
ParseDeclarator(DeclaratorInfo);
+ else
+ DeclaratorInfo.SetIdentifier(nullptr, Tok.getLocation());
if (!DeclaratorInfo.isFunctionDeclarator() && TryConsumeToken(tok::colon)) {
+ assert(DeclaratorInfo.isPastIdentifier() &&
+ "don't know where identifier would go yet?");
BitfieldSize = ParseConstantExpression();
if (BitfieldSize.isInvalid())
SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
} else
- ParseOptionalCXX11VirtSpecifierSeq(VS, getCurrentClass().IsInterface);
+ ParseOptionalCXX11VirtSpecifierSeq(
+ VS, getCurrentClass().IsInterface,
+ DeclaratorInfo.getDeclSpec().getFriendSpecLoc());
// If a simple-asm-expr is present, parse it.
if (Tok.is(tok::kw_asm)) {
@@ -1953,10 +2058,21 @@ void Parser::ParseCXXMemberDeclaratorBeforeInitializer(
// For compatibility with code written to older Clang, also accept a
// virt-specifier *after* the GNU attributes.
- // FIXME: If we saw any attributes that are known to GCC followed by a
- // virt-specifier, issue a GCC-compat warning.
- if (BitfieldSize.isUnset() && VS.isUnset())
- ParseOptionalCXX11VirtSpecifierSeq(VS, getCurrentClass().IsInterface);
+ if (BitfieldSize.isUnset() && VS.isUnset()) {
+ ParseOptionalCXX11VirtSpecifierSeq(
+ VS, getCurrentClass().IsInterface,
+ DeclaratorInfo.getDeclSpec().getFriendSpecLoc());
+ if (!VS.isUnset()) {
+ // If we saw any GNU-style attributes that are known to GCC followed by a
+ // virt-specifier, issue a GCC-compat warning.
+ const AttributeList *Attr = DeclaratorInfo.getAttributes();
+ while (Attr) {
+ if (Attr->isKnownToGCC() && !Attr->isCXX11Attribute())
+ Diag(Attr->getLoc(), diag::warn_gcc_attribute_location);
+ Attr = Attr->getNext();
+ }
+ }
+ }
}
/// ParseCXXClassMemberDeclaration - Parse a C++ class member declaration.
@@ -2021,7 +2137,8 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Access declarations.
bool MalformedTypeSpec = false;
if (!TemplateInfo.Kind &&
- (Tok.is(tok::identifier) || Tok.is(tok::coloncolon))) {
+ (Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
+ Tok.is(tok::kw___super))) {
if (TryAnnotateCXXScopeToken())
MalformedTypeSpec = true;
@@ -2039,6 +2156,11 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
/*EnteringContext=*/false);
+ if (SS.isInvalid()) {
+ SkipUntil(tok::semi);
+ return;
+ }
+
// Try to parse an unqualified-id.
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
@@ -2066,9 +2188,10 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
}
- // static_assert-declaration
- if (Tok.is(tok::kw_static_assert) || Tok.is(tok::kw__Static_assert)) {
- // FIXME: Check for templates
+ // static_assert-declaration. A templated static_assert declaration is
+ // diagnosed in Parser::ParseSingleDeclarationAfterTemplate.
+ if (!TemplateInfo.Kind &&
+ (Tok.is(tok::kw_static_assert) || Tok.is(tok::kw__Static_assert))) {
SourceLocation DeclEnd;
ParseStaticAssertDeclaration(DeclEnd);
return;
@@ -2309,7 +2432,6 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
E = Ranges.end(); I != E; ++I)
Diag((*I).getBegin(), diag::err_attributes_not_allowed) << *I;
- // TODO: handle initializers, VS, bitfields, 'delete'
ThisDecl = Actions.ActOnFriendFunctionDecl(getCurScope(), DeclaratorInfo,
TemplateParams);
} else {
@@ -2487,7 +2609,10 @@ ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
Diag(ConsumeToken(), diag::err_default_special_members);
return ExprError();
}
-
+ }
+ if (const auto *PD = dyn_cast_or_null<MSPropertyDecl>(D)) {
+ Diag(Tok, diag::err_ms_property_initializer) << PD;
+ return ExprError();
}
return ParseInitializer();
}
@@ -2581,17 +2706,60 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// and the only possible place for them to appertain
// to the class would be between class-key and class-name.
CheckMisplacedCXX11Attribute(Attrs, AttrFixitLoc);
+
+ // ParseClassSpecifier() does only a superficial check for attributes before
+ // deciding to call this method. For example, for
+ // `class C final alignas ([l) {` it will decide that this looks like a
+ // misplaced attribute since it sees `alignas '(' ')'`. But the actual
+ // attribute parsing code will try to parse the '[' as a constexpr lambda
+ // and consume enough tokens that the alignas parsing code will eat the
+ // opening '{'. So bail out if the next token isn't one we expect.
+ if (!Tok.is(tok::colon) && !Tok.is(tok::l_brace)) {
+ if (TagDecl)
+ Actions.ActOnTagDefinitionError(getCurScope(), TagDecl);
+ return;
+ }
}
if (Tok.is(tok::colon)) {
ParseBaseClause(TagDecl);
-
if (!Tok.is(tok::l_brace)) {
- Diag(Tok, diag::err_expected_lbrace_after_base_specifiers);
-
- if (TagDecl)
- Actions.ActOnTagDefinitionError(getCurScope(), TagDecl);
- return;
+ bool SuggestFixIt = false;
+ SourceLocation BraceLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ if (Tok.isAtStartOfLine()) {
+ switch (Tok.getKind()) {
+ case tok::kw_private:
+ case tok::kw_protected:
+ case tok::kw_public:
+ SuggestFixIt = NextToken().getKind() == tok::colon;
+ break;
+ case tok::kw_static_assert:
+ case tok::r_brace:
+ case tok::kw_using:
+ // base-clause can have simple-template-id; 'template' can't be there
+ case tok::kw_template:
+ SuggestFixIt = true;
+ break;
+ case tok::identifier:
+ SuggestFixIt = isConstructorDeclarator(true);
+ break;
+ default:
+ SuggestFixIt = isCXXSimpleDeclaration(/*AllowForRangeDecl=*/false);
+ break;
+ }
+ }
+ DiagnosticBuilder LBraceDiag =
+ Diag(BraceLoc, diag::err_expected_lbrace_after_base_specifiers);
+ if (SuggestFixIt) {
+ LBraceDiag << FixItHint::CreateInsertion(BraceLoc, " {");
+ // Try recovering from missing { after base-clause.
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::l_brace);
+ } else {
+ if (TagDecl)
+ Actions.ActOnTagDefinitionError(getCurScope(), TagDecl);
+ return;
+ }
}
}
@@ -2725,7 +2893,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// C++11 [class.mem]p2:
// Within the class member-specification, the class is regarded as complete
- // within function bodies, default arguments, and
+ // within function bodies, default arguments, exception-specifications, and
// brace-or-equal-initializers for non-static data members (including such
// things in nested classes).
if (TagDecl && NonNestedClass) {
@@ -2854,7 +3022,7 @@ void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
/// [C++] mem-initializer-id:
/// '::'[opt] nested-name-specifier[opt] class-name
/// identifier
-Parser::MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
+MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
// parse '::'[opt] nested-name-specifier[opt]
CXXScopeSpec SS;
ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
@@ -2944,13 +3112,51 @@ Parser::MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
/// 'noexcept'
/// 'noexcept' '(' constant-expression ')'
ExceptionSpecificationType
-Parser::tryParseExceptionSpecification(
+Parser::tryParseExceptionSpecification(bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
- ExprResult &NoexceptExpr) {
+ ExprResult &NoexceptExpr,
+ CachedTokens *&ExceptionSpecTokens) {
ExceptionSpecificationType Result = EST_None;
-
+ ExceptionSpecTokens = 0;
+
+ // Handle delayed parsing of exception-specifications.
+ if (Delayed) {
+ if (Tok.isNot(tok::kw_throw) && Tok.isNot(tok::kw_noexcept))
+ return EST_None;
+
+ // Consume and cache the starting token.
+ bool IsNoexcept = Tok.is(tok::kw_noexcept);
+ Token StartTok = Tok;
+ SpecificationRange = SourceRange(ConsumeToken());
+
+ // Check for a '('.
+ if (!Tok.is(tok::l_paren)) {
+ // If this is a bare 'noexcept', we're done.
+ if (IsNoexcept) {
+ Diag(Tok, diag::warn_cxx98_compat_noexcept_decl);
+ NoexceptExpr = 0;
+ return EST_BasicNoexcept;
+ }
+
+ Diag(Tok, diag::err_expected_lparen_after) << "throw";
+ return EST_DynamicNone;
+ }
+
+ // Cache the tokens for the exception-specification.
+ ExceptionSpecTokens = new CachedTokens;
+ ExceptionSpecTokens->push_back(StartTok); // 'throw' or 'noexcept'
+ ExceptionSpecTokens->push_back(Tok); // '('
+ SpecificationRange.setEnd(ConsumeParen()); // '('
+
+ ConsumeAndStoreUntil(tok::r_paren, *ExceptionSpecTokens,
+ /*StopAtSemi=*/true,
+ /*ConsumeFinalToken=*/true);
+ SpecificationRange.setEnd(Tok.getLocation());
+ return EST_Unparsed;
+ }
+
// See if there's a dynamic specification.
if (Tok.is(tok::kw_throw)) {
Result = ParseDynamicExceptionSpecification(SpecificationRange,
@@ -3168,9 +3374,11 @@ IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
switch (Tok.getKind()) {
default:
// Identifiers and keywords have identifier info attached.
- if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
- Loc = ConsumeToken();
- return II;
+ if (!Tok.isAnnotation()) {
+ if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ Loc = ConsumeToken();
+ return II;
+ }
}
return nullptr;
@@ -3265,7 +3473,6 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
if (Attr->getMaxArgs() && !NumArgs) {
// The attribute was allowed to have arguments, but none were provided
// even though the attribute parsed successfully. This is an error.
- // FIXME: This is a good place for a fixit which removes the parens.
Diag(LParenLoc, diag::err_attribute_requires_arguments) << AttrName;
return false;
} else if (!Attr->getMaxArgs()) {
@@ -3273,7 +3480,8 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
// arguments. It doesn't matter whether any were provided -- the
// presence of the argument list (even if empty) is diagnosed.
Diag(LParenLoc, diag::err_cxx11_attribute_forbids_arguments)
- << AttrName;
+ << AttrName
+ << FixItHint::CreateRemoval(SourceRange(LParenLoc, *EndLoc));
return false;
}
}
diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp
index 0e4dfb91ad5d..d0d97de84c10 100644
--- a/lib/Parse/ParseExpr.cpp
+++ b/lib/Parse/ParseExpr.cpp
@@ -216,6 +216,13 @@ bool Parser::isNotExpressionStart() {
return isKnownToBeDeclarationSpecifier();
}
+static bool isFoldOperator(prec::Level Level) {
+ return Level > prec::Unknown && Level != prec::Conditional;
+}
+static bool isFoldOperator(tok::TokenKind Kind) {
+ return isFoldOperator(getBinOpPrecedence(Kind, false, true));
+}
+
/// \brief Parse a binary expression that starts with \p LHS and has a
/// precedence of at least \p MinPrec.
ExprResult
@@ -247,6 +254,16 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
return LHS;
}
+ // If the next token is an ellipsis, then this is a fold-expression. Leave
+ // it alone so we can handle it in the paren expression.
+ if (isFoldOperator(NextTokPrec) && Tok.is(tok::ellipsis)) {
+ // FIXME: We can't check this via lookahead before we consume the token
+ // because that tickles a lexer bug.
+ PP.EnterToken(Tok);
+ Tok = OpToken;
+ return LHS;
+ }
+
// Special case handling for the ternary operator.
ExprResult TernaryMiddle(true);
if (NextTokPrec == prec::Conditional) {
@@ -260,6 +277,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
// 'logical-OR-expression' as we might expect.
TernaryMiddle = ParseExpression();
if (TernaryMiddle.isInvalid()) {
+ Actions.CorrectDelayedTyposInExpr(LHS);
LHS = ExprError();
TernaryMiddle = nullptr;
}
@@ -328,9 +346,11 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
else
RHS = ParseCastExpression(false);
- if (RHS.isInvalid())
+ if (RHS.isInvalid()) {
+ Actions.CorrectDelayedTyposInExpr(LHS);
LHS = ExprError();
-
+ }
+
// Remember the precedence of this operator and get the precedence of the
// operator immediately to the right of the RHS.
prec::Level ThisPrec = NextTokPrec;
@@ -359,13 +379,14 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
static_cast<prec::Level>(ThisPrec + !isRightAssoc));
RHSIsInitList = false;
- if (RHS.isInvalid())
+ if (RHS.isInvalid()) {
+ Actions.CorrectDelayedTyposInExpr(LHS);
LHS = ExprError();
+ }
NextTokPrec = getBinOpPrecedence(Tok.getKind(), GreaterThanIsOperator,
getLangOpts().CPlusPlus11);
}
- assert(NextTokPrec <= ThisPrec && "Recursion didn't work!");
if (!RHS.isInvalid() && RHSIsInitList) {
if (ThisPrec == prec::Assignment) {
@@ -397,7 +418,9 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
LHS = Actions.ActOnConditionalOp(OpToken.getLocation(), ColonLoc,
LHS.get(), TernaryMiddle.get(),
RHS.get());
- }
+ } else
+ // Ensure potential typos in the RHS aren't left undiagnosed.
+ Actions.CorrectDelayedTyposInExpr(RHS);
}
}
@@ -425,7 +448,7 @@ class CastExpressionIdValidator : public CorrectionCandidateCallback {
public:
CastExpressionIdValidator(bool AllowTypes, bool AllowNonTypes)
: AllowNonTypes(AllowNonTypes) {
- WantTypeSpecifiers = AllowTypes;
+ WantTypeSpecifiers = WantFunctionLikeCasts = AllowTypes;
}
bool ValidateCandidate(const TypoCorrection &candidate) override {
@@ -688,11 +711,12 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
ConsumeToken();
break;
+ case tok::kw___super:
case tok::kw_decltype:
// Annotate the token and tail recurse.
if (TryAnnotateTypeOrScopeToken())
return ExprError();
- assert(Tok.isNot(tok::kw_decltype));
+ assert(Tok.isNot(tok::kw_decltype) && Tok.isNot(tok::kw___super));
return ParseCastExpression(isUnaryExpression, isAddressOfOperand);
case tok::identifier: { // primary-expression: identifier
@@ -708,11 +732,81 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// If this identifier was reverted from a token ID, and the next token
// is a parenthesis, this is likely to be a use of a type trait. Check
// those tokens.
- if (Next.is(tok::l_paren) && Tok.is(tok::identifier) &&
- Tok.getIdentifierInfo()->hasRevertedTokenIDToIdentifier() &&
- TryIdentKeywordUpgrade())
- return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
- NotCastExpr, isTypeCast);
+ if (Next.is(tok::l_paren) &&
+ Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo()->hasRevertedTokenIDToIdentifier()) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ // Build up the mapping of revertible type traits, for future use.
+ if (RevertibleTypeTraits.empty()) {
+#define RTT_JOIN(X,Y) X##Y
+#define REVERTIBLE_TYPE_TRAIT(Name) \
+ RevertibleTypeTraits[PP.getIdentifierInfo(#Name)] \
+ = RTT_JOIN(tok::kw_,Name)
+
+ REVERTIBLE_TYPE_TRAIT(__is_abstract);
+ REVERTIBLE_TYPE_TRAIT(__is_arithmetic);
+ REVERTIBLE_TYPE_TRAIT(__is_array);
+ REVERTIBLE_TYPE_TRAIT(__is_base_of);
+ REVERTIBLE_TYPE_TRAIT(__is_class);
+ REVERTIBLE_TYPE_TRAIT(__is_complete_type);
+ REVERTIBLE_TYPE_TRAIT(__is_compound);
+ REVERTIBLE_TYPE_TRAIT(__is_const);
+ REVERTIBLE_TYPE_TRAIT(__is_constructible);
+ REVERTIBLE_TYPE_TRAIT(__is_convertible);
+ REVERTIBLE_TYPE_TRAIT(__is_convertible_to);
+ REVERTIBLE_TYPE_TRAIT(__is_destructible);
+ REVERTIBLE_TYPE_TRAIT(__is_empty);
+ REVERTIBLE_TYPE_TRAIT(__is_enum);
+ REVERTIBLE_TYPE_TRAIT(__is_floating_point);
+ REVERTIBLE_TYPE_TRAIT(__is_final);
+ REVERTIBLE_TYPE_TRAIT(__is_function);
+ REVERTIBLE_TYPE_TRAIT(__is_fundamental);
+ REVERTIBLE_TYPE_TRAIT(__is_integral);
+ REVERTIBLE_TYPE_TRAIT(__is_interface_class);
+ REVERTIBLE_TYPE_TRAIT(__is_literal);
+ REVERTIBLE_TYPE_TRAIT(__is_lvalue_expr);
+ REVERTIBLE_TYPE_TRAIT(__is_lvalue_reference);
+ REVERTIBLE_TYPE_TRAIT(__is_member_function_pointer);
+ REVERTIBLE_TYPE_TRAIT(__is_member_object_pointer);
+ REVERTIBLE_TYPE_TRAIT(__is_member_pointer);
+ REVERTIBLE_TYPE_TRAIT(__is_nothrow_assignable);
+ REVERTIBLE_TYPE_TRAIT(__is_nothrow_constructible);
+ REVERTIBLE_TYPE_TRAIT(__is_nothrow_destructible);
+ REVERTIBLE_TYPE_TRAIT(__is_object);
+ REVERTIBLE_TYPE_TRAIT(__is_pod);
+ REVERTIBLE_TYPE_TRAIT(__is_pointer);
+ REVERTIBLE_TYPE_TRAIT(__is_polymorphic);
+ REVERTIBLE_TYPE_TRAIT(__is_reference);
+ REVERTIBLE_TYPE_TRAIT(__is_rvalue_expr);
+ REVERTIBLE_TYPE_TRAIT(__is_rvalue_reference);
+ REVERTIBLE_TYPE_TRAIT(__is_same);
+ REVERTIBLE_TYPE_TRAIT(__is_scalar);
+ REVERTIBLE_TYPE_TRAIT(__is_sealed);
+ REVERTIBLE_TYPE_TRAIT(__is_signed);
+ REVERTIBLE_TYPE_TRAIT(__is_standard_layout);
+ REVERTIBLE_TYPE_TRAIT(__is_trivial);
+ REVERTIBLE_TYPE_TRAIT(__is_trivially_assignable);
+ REVERTIBLE_TYPE_TRAIT(__is_trivially_constructible);
+ REVERTIBLE_TYPE_TRAIT(__is_trivially_copyable);
+ REVERTIBLE_TYPE_TRAIT(__is_union);
+ REVERTIBLE_TYPE_TRAIT(__is_unsigned);
+ REVERTIBLE_TYPE_TRAIT(__is_void);
+ REVERTIBLE_TYPE_TRAIT(__is_volatile);
+#undef REVERTIBLE_TYPE_TRAIT
+#undef RTT_JOIN
+ }
+
+ // If we find that this is in fact the name of a type trait,
+ // update the token kind in place and parse again to treat it as
+ // the appropriate kind of type trait.
+ llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind>::iterator Known
+ = RevertibleTypeTraits.find(II);
+ if (Known != RevertibleTypeTraits.end()) {
+ Tok.setKind(Known->second);
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
+ NotCastExpr, isTypeCast);
+ }
+ }
if (Next.is(tok::coloncolon) ||
(!ColonIsSacred && Next.is(tok::colon)) ||
@@ -812,17 +906,26 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
UnqualifiedId Name;
CXXScopeSpec ScopeSpec;
SourceLocation TemplateKWLoc;
- CastExpressionIdValidator Validator(isTypeCast != NotTypeCast,
- isTypeCast != IsTypeCast);
- Validator.IsAddressOfOperand = isAddressOfOperand;
+ Token Replacement;
+ auto Validator = llvm::make_unique<CastExpressionIdValidator>(
+ isTypeCast != NotTypeCast, isTypeCast != IsTypeCast);
+ Validator->IsAddressOfOperand = isAddressOfOperand;
+ Validator->WantRemainingKeywords = Tok.isNot(tok::r_paren);
Name.setIdentifier(&II, ILoc);
- Res = Actions.ActOnIdExpression(getCurScope(), ScopeSpec, TemplateKWLoc,
- Name, Tok.is(tok::l_paren),
- isAddressOfOperand, &Validator);
+ Res = Actions.ActOnIdExpression(
+ getCurScope(), ScopeSpec, TemplateKWLoc, Name, Tok.is(tok::l_paren),
+ isAddressOfOperand, std::move(Validator),
+ /*IsInlineAsmIdentifier=*/false, &Replacement);
+ if (!Res.isInvalid() && !Res.get()) {
+ UnconsumeToken(Replacement);
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
+ NotCastExpr, isTypeCast);
+ }
break;
}
case tok::char_constant: // constant: character-constant
case tok::wide_char_constant:
+ case tok::utf8_char_constant:
case tok::utf16_char_constant:
case tok::utf32_char_constant:
Res = Actions.ActOnCharacterConstant(Tok, /*UDLScope*/getCurScope());
@@ -1261,8 +1364,12 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (!LHS.isInvalid() && !Idx.isInvalid() && Tok.is(tok::r_square)) {
LHS = Actions.ActOnArraySubscriptExpr(getCurScope(), LHS.get(), Loc,
Idx.get(), RLoc);
- } else
+ } else {
+ (void)Actions.CorrectDelayedTyposInExpr(LHS);
+ (void)Actions.CorrectDelayedTyposInExpr(Idx);
LHS = ExprError();
+ Idx = ExprError();
+ }
// Match the ']'.
T.consumeClose();
@@ -1285,6 +1392,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
SourceLocation OpenLoc = ConsumeToken();
if (ParseSimpleExpressionList(ExecConfigExprs, ExecConfigCommaLocs)) {
+ (void)Actions.CorrectDelayedTyposInExpr(LHS);
LHS = ExprError();
}
@@ -1335,6 +1443,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (Tok.isNot(tok::r_paren)) {
if (ParseExpressionList(ArgExprs, CommaLocs, &Sema::CodeCompleteCall,
LHS.get())) {
+ (void)Actions.CorrectDelayedTyposInExpr(LHS);
LHS = ExprError();
}
}
@@ -1433,8 +1542,10 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
/*AllowDestructorName=*/true,
/*AllowConstructorName=*/
getLangOpts().MicrosoftExt,
- ObjectType, TemplateKWLoc, Name))
+ ObjectType, TemplateKWLoc, Name)) {
+ (void)Actions.CorrectDelayedTyposInExpr(LHS);
LHS = ExprError();
+ }
if (!LHS.isInvalid())
LHS = Actions.ActOnMemberAccessExpr(getCurScope(), LHS.get(), OpLoc,
@@ -1918,11 +2029,15 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
/// cast-expression: [C99 6.5.4]
/// '(' type-name ')' cast-expression
/// [ARC] bridged-cast-expression
-///
/// [ARC] bridged-cast-expression:
/// (__bridge type-name) cast-expression
/// (__bridge_transfer type-name) cast-expression
/// (__bridge_retained type-name) cast-expression
+/// fold-expression: [C++1z]
+/// '(' cast-expression fold-operator '...' ')'
+/// '(' '...' fold-operator cast-expression ')'
+/// '(' cast-expression fold-operator '...'
+/// fold-operator cast-expression ')'
/// \endverbatim
ExprResult
Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
@@ -1969,16 +2084,21 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// unless they've already reported an error.
if (ExprType >= CompoundStmt && Tok.is(tok::l_brace)) {
Diag(Tok, diag::ext_gnu_statement_expr);
- Actions.ActOnStartStmtExpr();
- StmtResult Stmt(ParseCompoundStatement(true));
- ExprType = CompoundStmt;
-
- // If the substmt parsed correctly, build the AST node.
- if (!Stmt.isInvalid()) {
- Result = Actions.ActOnStmtExpr(OpenLoc, Stmt.get(), Tok.getLocation());
+ if (!getCurScope()->getFnParent() && !getCurScope()->getBlockParent()) {
+ Result = ExprError(Diag(OpenLoc, diag::err_stmtexpr_file_scope));
} else {
- Actions.ActOnStmtExprError();
+ Actions.ActOnStartStmtExpr();
+
+ StmtResult Stmt(ParseCompoundStatement(true));
+ ExprType = CompoundStmt;
+
+ // If the substmt parsed correctly, build the AST node.
+ if (!Stmt.isInvalid()) {
+ Result = Actions.ActOnStmtExpr(OpenLoc, Stmt.get(), Tok.getLocation());
+ } else {
+ Actions.ActOnStmtExprError();
+ }
}
} else if (ExprType >= CompoundLiteral && BridgeCast) {
tok::TokenKind tokenKind = Tok.getKind();
@@ -2111,24 +2231,36 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
Diag(Tok, diag::err_expected_lbrace_in_compound_literal);
return ExprError();
}
+ } else if (Tok.is(tok::ellipsis) &&
+ isFoldOperator(NextToken().getKind())) {
+ return ParseFoldExpression(ExprResult(), T);
} else if (isTypeCast) {
// Parse the expression-list.
InMessageExpressionRAIIObject InMessage(*this, false);
-
+
ExprVector ArgExprs;
CommaLocsTy CommaLocs;
if (!ParseSimpleExpressionList(ArgExprs, CommaLocs)) {
+ // FIXME: If we ever support comma expressions as operands to
+ // fold-expressions, we'll need to allow multiple ArgExprs here.
+ if (ArgExprs.size() == 1 && isFoldOperator(Tok.getKind()) &&
+ NextToken().is(tok::ellipsis))
+ return ParseFoldExpression(Result, T);
+
ExprType = SimpleExpr;
Result = Actions.ActOnParenListExpr(OpenLoc, Tok.getLocation(),
ArgExprs);
}
} else {
InMessageExpressionRAIIObject InMessage(*this, false);
-
+
Result = ParseExpression(MaybeTypeCast);
ExprType = SimpleExpr;
+ if (isFoldOperator(Tok.getKind()) && NextToken().is(tok::ellipsis))
+ return ParseFoldExpression(Result, T);
+
// Don't build a paren expression unless we actually match a ')'.
if (!Result.isInvalid() && Tok.is(tok::r_paren))
Result =
@@ -2286,6 +2418,59 @@ ExprResult Parser::ParseGenericSelectionExpression() {
Types, Exprs);
}
+/// \brief Parse A C++1z fold-expression after the opening paren and optional
+/// left-hand-side expression.
+///
+/// \verbatim
+/// fold-expression:
+/// ( cast-expression fold-operator ... )
+/// ( ... fold-operator cast-expression )
+/// ( cast-expression fold-operator ... fold-operator cast-expression )
+ExprResult Parser::ParseFoldExpression(ExprResult LHS,
+ BalancedDelimiterTracker &T) {
+ if (LHS.isInvalid()) {
+ T.skipToEnd();
+ return true;
+ }
+
+ tok::TokenKind Kind = tok::unknown;
+ SourceLocation FirstOpLoc;
+ if (LHS.isUsable()) {
+ Kind = Tok.getKind();
+ assert(isFoldOperator(Kind) && "missing fold-operator");
+ FirstOpLoc = ConsumeToken();
+ }
+
+ assert(Tok.is(tok::ellipsis) && "not a fold-expression");
+ SourceLocation EllipsisLoc = ConsumeToken();
+
+ ExprResult RHS;
+ if (Tok.isNot(tok::r_paren)) {
+ if (!isFoldOperator(Tok.getKind()))
+ return Diag(Tok.getLocation(), diag::err_expected_fold_operator);
+
+ if (Kind != tok::unknown && Tok.getKind() != Kind)
+ Diag(Tok.getLocation(), diag::err_fold_operator_mismatch)
+ << SourceRange(FirstOpLoc);
+ Kind = Tok.getKind();
+ ConsumeToken();
+
+ RHS = ParseExpression();
+ if (RHS.isInvalid()) {
+ T.skipToEnd();
+ return true;
+ }
+ }
+
+ Diag(EllipsisLoc, getLangOpts().CPlusPlus1z
+ ? diag::warn_cxx14_compat_fold_expression
+ : diag::ext_fold_expression);
+
+ T.consumeClose();
+ return Actions.ActOnCXXFoldExpr(T.getOpenLocation(), LHS.get(), Kind,
+ EllipsisLoc, RHS.get(), T.getCloseLocation());
+}
+
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
///
/// \verbatim
@@ -2342,10 +2527,19 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr*> &Exprs,
}
if (Tok.isNot(tok::comma))
- return SawError;
+ break;
// Move to the next argument, remember where the comma was.
CommaLocs.push_back(ConsumeToken());
}
+ if (SawError) {
+ // Ensure typos get diagnosed when errors were encountered while parsing the
+ // expression list.
+ for (auto &E : Exprs) {
+ ExprResult Expr = Actions.CorrectDelayedTyposInExpr(E);
+ if (Expr.isUsable()) E = Expr.get();
+ }
+ }
+ return SawError;
}
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
@@ -2477,6 +2671,7 @@ ExprResult Parser::ParseBlockLiteralExpression() {
/*RefQualifierLoc=*/NoLoc,
/*ConstQualifierLoc=*/NoLoc,
/*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc,
/*MutableLoc=*/NoLoc,
EST_None,
/*ESpecLoc=*/NoLoc,
@@ -2484,6 +2679,7 @@ ExprResult Parser::ParseBlockLiteralExpression() {
/*ExceptionRanges=*/nullptr,
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
+ /*ExceptionSpecTokens=*/nullptr,
CaretLoc, CaretLoc,
ParamInfo),
attrs, CaretLoc);
diff --git a/lib/Parse/ParseExprCXX.cpp b/lib/Parse/ParseExprCXX.cpp
index 83121a8b1a2d..355503caa9be 100644
--- a/lib/Parse/ParseExprCXX.cpp
+++ b/lib/Parse/ParseExprCXX.cpp
@@ -108,35 +108,37 @@ void Parser::CheckForLParenAfterColonColon() {
if (!Tok.is(tok::l_paren))
return;
- SourceLocation l_parenLoc = ConsumeParen(), r_parenLoc;
- Token Tok1 = getCurToken();
- if (!Tok1.is(tok::identifier) && !Tok1.is(tok::star))
+ Token LParen = Tok;
+ Token NextTok = GetLookAheadToken(1);
+ Token StarTok = NextTok;
+ // Check for (identifier or (*identifier
+ Token IdentifierTok = StarTok.is(tok::star) ? GetLookAheadToken(2) : StarTok;
+ if (IdentifierTok.isNot(tok::identifier))
return;
-
- if (Tok1.is(tok::identifier)) {
- Token Tok2 = GetLookAheadToken(1);
- if (Tok2.is(tok::r_paren)) {
+ // Eat the '('.
+ ConsumeParen();
+ Token RParen;
+ // Do we have a ')' ?
+ NextTok = StarTok.is(tok::star) ? GetLookAheadToken(2) : GetLookAheadToken(1);
+ if (NextTok.is(tok::r_paren)) {
+ RParen = NextTok;
+ // Eat the '*' if it is present.
+ if (StarTok.is(tok::star))
ConsumeToken();
- PP.EnterToken(Tok1);
- r_parenLoc = ConsumeParen();
- }
- } else if (Tok1.is(tok::star)) {
- Token Tok2 = GetLookAheadToken(1);
- if (Tok2.is(tok::identifier)) {
- Token Tok3 = GetLookAheadToken(2);
- if (Tok3.is(tok::r_paren)) {
- ConsumeToken();
- ConsumeToken();
- PP.EnterToken(Tok2);
- PP.EnterToken(Tok1);
- r_parenLoc = ConsumeParen();
- }
- }
- }
-
- Diag(l_parenLoc, diag::err_paren_after_colon_colon)
- << FixItHint::CreateRemoval(l_parenLoc)
- << FixItHint::CreateRemoval(r_parenLoc);
+ // Eat the identifier.
+ ConsumeToken();
+ // Add the identifier token back.
+ PP.EnterToken(IdentifierTok);
+ // Add the '*' back if it was present.
+ if (StarTok.is(tok::star))
+ PP.EnterToken(StarTok);
+ // Eat the ')'.
+ ConsumeParen();
+ }
+
+ Diag(LParen.getLocation(), diag::err_paren_after_colon_colon)
+ << FixItHint::CreateRemoval(LParen.getLocation())
+ << FixItHint::CreateRemoval(RParen.getLocation());
}
/// \brief Parse global scope or nested-name-specifier if present.
@@ -217,13 +219,29 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
if (NextKind == tok::kw_new || NextKind == tok::kw_delete)
return false;
- // '::' - Global scope qualifier.
- if (Actions.ActOnCXXGlobalScopeSpecifier(getCurScope(), ConsumeToken(), SS))
- return true;
+ if (NextKind == tok::l_brace) {
+ // It is invalid to have :: {, consume the scope qualifier and pretend
+ // like we never saw it.
+ Diag(ConsumeToken(), diag::err_expected) << tok::identifier;
+ } else {
+ // '::' - Global scope qualifier.
+ if (Actions.ActOnCXXGlobalScopeSpecifier(ConsumeToken(), SS))
+ return true;
- CheckForLParenAfterColonColon();
+ CheckForLParenAfterColonColon();
- HasScopeSpecifier = true;
+ HasScopeSpecifier = true;
+ }
+ }
+
+ if (Tok.is(tok::kw___super)) {
+ SourceLocation SuperLoc = ConsumeToken();
+ if (!Tok.is(tok::coloncolon)) {
+ Diag(Tok.getLocation(), diag::err_expected_coloncolon_after_super);
+ return true;
+ }
+
+ return Actions.ActOnSuperScopeSpecifier(SuperLoc, ConsumeToken(), SS);
}
bool CheckForDestructor = false;
@@ -232,7 +250,8 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
*MayBePseudoDestructor = false;
}
- if (Tok.is(tok::kw_decltype) || Tok.is(tok::annot_decltype)) {
+ if (!HasScopeSpecifier &&
+ (Tok.is(tok::kw_decltype) || Tok.is(tok::annot_decltype))) {
DeclSpec DS(AttrFactory);
SourceLocation DeclLoc = Tok.getLocation();
SourceLocation EndLoc = ParseDecltypeSpecifier(DS);
@@ -423,11 +442,22 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
Next.setKind(tok::coloncolon);
}
}
-
+
+ if (Next.is(tok::coloncolon) && GetLookAheadToken(2).is(tok::l_brace)) {
+ // It is invalid to have :: {, consume the scope qualifier and pretend
+ // like we never saw it.
+ Token Identifier = Tok; // Stash away the identifier.
+ ConsumeToken(); // Eat the identifier, current token is now '::'.
+ Diag(PP.getLocForEndOfToken(ConsumeToken()), diag::err_expected)
+ << tok::identifier;
+ UnconsumeToken(Identifier); // Stick the identifier back.
+ Next = NextToken(); // Point Next at the '{' token.
+ }
+
if (Next.is(tok::coloncolon)) {
if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde) &&
- !Actions.isNonTypeNestedNameSpecifier(getCurScope(), SS, Tok.getLocation(),
- II, ObjectType)) {
+ !Actions.isNonTypeNestedNameSpecifier(
+ getCurScope(), SS, Tok.getLocation(), II, ObjectType)) {
*MayBePseudoDestructor = true;
return false;
}
@@ -556,6 +586,28 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
return false;
}
+ExprResult Parser::tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
+ Token &Replacement) {
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+ if (ParseUnqualifiedId(SS,
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/false,
+ /*AllowConstructorName=*/false,
+ /*ObjectType=*/ParsedType(), TemplateKWLoc, Name))
+ return ExprError();
+
+ // This is only the direct operand of an & operator if it is not
+ // followed by a postfix-expression suffix.
+ if (isAddressOfOperand && isPostfixExpressionSuffixStart())
+ isAddressOfOperand = false;
+
+ return Actions.ActOnIdExpression(getCurScope(), SS, TemplateKWLoc, Name,
+ Tok.is(tok::l_paren), isAddressOfOperand,
+ nullptr, /*IsInlineAsmIdentifier=*/false,
+ &Replacement);
+}
+
/// ParseCXXIdExpression - Handle id-expression.
///
/// id-expression:
@@ -606,24 +658,17 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
CXXScopeSpec SS;
ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
- SourceLocation TemplateKWLoc;
- UnqualifiedId Name;
- if (ParseUnqualifiedId(SS,
- /*EnteringContext=*/false,
- /*AllowDestructorName=*/false,
- /*AllowConstructorName=*/false,
- /*ObjectType=*/ ParsedType(),
- TemplateKWLoc,
- Name))
- return ExprError();
-
- // This is only the direct operand of an & operator if it is not
- // followed by a postfix-expression suffix.
- if (isAddressOfOperand && isPostfixExpressionSuffixStart())
- isAddressOfOperand = false;
-
- return Actions.ActOnIdExpression(getCurScope(), SS, TemplateKWLoc, Name,
- Tok.is(tok::l_paren), isAddressOfOperand);
+ Token Replacement;
+ ExprResult Result = tryParseCXXIdExpression(SS, isAddressOfOperand, Replacement);
+ if (Result.isUnset()) {
+ // If the ExprResult is valid but null, then typo correction suggested a
+ // keyword replacement that needs to be reparsed.
+ UnconsumeToken(Replacement);
+ Result = tryParseCXXIdExpression(SS, isAddressOfOperand, Replacement);
+ }
+ assert(!Result.isUnset() && "Typo correction suggested a keyword replacement "
+ "for a previous keyword suggestion");
+ return Result;
}
/// ParseLambdaExpression - Parse a C++11 lambda expression.
@@ -1007,6 +1052,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
Actions.PushLambdaScope();
+ TypeResult TrailingReturnType;
if (Tok.is(tok::l_paren)) {
ParseScope PrototypeScope(this,
Scope::FunctionPrototypeScope |
@@ -1050,10 +1096,13 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
SmallVector<ParsedType, 2> DynamicExceptions;
SmallVector<SourceRange, 2> DynamicExceptionRanges;
ExprResult NoexceptExpr;
- ESpecType = tryParseExceptionSpecification(ESpecRange,
+ CachedTokens *ExceptionSpecTokens;
+ ESpecType = tryParseExceptionSpecification(/*Delayed=*/false,
+ ESpecRange,
DynamicExceptions,
DynamicExceptionRanges,
- NoexceptExpr);
+ NoexceptExpr,
+ ExceptionSpecTokens);
if (ESpecType != EST_None)
DeclEndLoc = ESpecRange.getEnd();
@@ -1064,7 +1113,6 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
SourceLocation FunLocalRangeEnd = DeclEndLoc;
// Parse trailing-return-type[opt].
- TypeResult TrailingReturnType;
if (Tok.is(tok::arrow)) {
FunLocalRangeEnd = Tok.getLocation();
SourceRange Range;
@@ -1086,6 +1134,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
/*RefQualifierLoc=*/NoLoc,
/*ConstQualifierLoc=*/NoLoc,
/*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc,
MutableLoc,
ESpecType, ESpecRange.getBegin(),
DynamicExceptions.data(),
@@ -1093,6 +1142,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
DynamicExceptions.size(),
NoexceptExpr.isUsable() ?
NoexceptExpr.get() : nullptr,
+ /*ExceptionSpecTokens*/nullptr,
LParenLoc, FunLocalRangeEnd, D,
TrailingReturnType),
Attr, DeclEndLoc);
@@ -1132,12 +1182,11 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
MaybeParseCXX11Attributes(Attr, &DeclEndLoc);
// Parse the return type, if there is one.
- TypeResult TrailingReturnType;
if (Tok.is(tok::arrow)) {
SourceRange Range;
TrailingReturnType = ParseTrailingReturnType(Range);
if (Range.getEnd().isValid())
- DeclEndLoc = Range.getEnd();
+ DeclEndLoc = Range.getEnd();
}
SourceLocation NoLoc;
@@ -1153,6 +1202,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
/*RefQualifierLoc=*/NoLoc,
/*ConstQualifierLoc=*/NoLoc,
/*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc,
MutableLoc,
EST_None,
/*ESpecLoc=*/NoLoc,
@@ -1160,6 +1210,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
/*ExceptionRanges=*/nullptr,
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
+ /*ExceptionSpecTokens=*/nullptr,
DeclLoc, DeclEndLoc, D,
TrailingReturnType),
Attr, DeclEndLoc);
@@ -1183,9 +1234,9 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
StmtResult Stmt(ParseCompoundStatementBody());
BodyScope.Exit();
- if (!Stmt.isInvalid())
+ if (!Stmt.isInvalid() && !TrailingReturnType.isInvalid())
return Actions.ActOnLambdaExpr(LambdaBeginLoc, Stmt.get(), getCurScope());
-
+
Actions.ActOnLambdaError(LambdaBeginLoc, getCurScope());
return ExprError();
}
@@ -1389,7 +1440,7 @@ ExprResult Parser::ParseCXXUuidof() {
/// ::[opt] nested-name-specifier[opt] ~type-name
///
ExprResult
-Parser::ParseCXXPseudoDestructor(ExprArg Base, SourceLocation OpLoc,
+Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType) {
@@ -2452,10 +2503,29 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
return true;
}
+ // If the user wrote ~T::T, correct it to T::~T.
+ if (!TemplateSpecified && NextToken().is(tok::coloncolon)) {
+ if (SS.isSet()) {
+ AnnotateScopeToken(SS, /*NewAnnotation*/true);
+ SS.clear();
+ }
+ if (ParseOptionalCXXScopeSpecifier(SS, ObjectType, EnteringContext))
+ return true;
+ if (Tok.isNot(tok::identifier) || NextToken().is(tok::coloncolon)) {
+ Diag(TildeLoc, diag::err_destructor_tilde_scope);
+ return true;
+ }
+
+ // Recover as if the tilde had been written before the identifier.
+ Diag(TildeLoc, diag::err_destructor_tilde_scope)
+ << FixItHint::CreateRemoval(TildeLoc)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "~");
+ }
+
// Parse the class-name (or template-name in a simple-template-id).
IdentifierInfo *ClassName = Tok.getIdentifierInfo();
SourceLocation ClassNameLoc = ConsumeToken();
-
+
if (TemplateSpecified || Tok.is(tok::less)) {
Result.setDestructorName(TildeLoc, ParsedType(), ClassNameLoc);
return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc,
@@ -2463,7 +2533,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
EnteringContext, ObjectType,
Result, TemplateSpecified);
}
-
+
// Note that this is a destructor name.
ParsedType Ty = Actions.getDestructorName(TildeLoc, *ClassName,
ClassNameLoc, getCurScope(),
diff --git a/lib/Parse/ParseInit.cpp b/lib/Parse/ParseInit.cpp
index 8536420088f3..7fe9862d3992 100644
--- a/lib/Parse/ParseInit.cpp
+++ b/lib/Parse/ParseInit.cpp
@@ -423,9 +423,11 @@ ExprResult Parser::ParseBraceInitializer() {
if (Tok.is(tok::ellipsis))
SubElt = Actions.ActOnPackExpansion(SubElt.get(), ConsumeToken());
-
+
+ SubElt = Actions.CorrectDelayedTyposInExpr(SubElt.get());
+
// If we couldn't parse the subelement, bail out.
- if (!SubElt.isInvalid()) {
+ if (SubElt.isUsable()) {
InitExprs.push_back(SubElt.get());
} else {
InitExprsOk = false;
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
index 7fe72ec0c3b1..a597a1658cf6 100644
--- a/lib/Parse/ParseObjc.cpp
+++ b/lib/Parse/ParseObjc.cpp
@@ -79,7 +79,7 @@ Parser::DeclGroupPtrTy Parser::ParseObjCAtDirectives() {
SingleDecl = ParseObjCPropertyDynamic(AtLoc);
break;
case tok::objc_import:
- if (getLangOpts().Modules)
+ if (getLangOpts().Modules || getLangOpts().DebuggerSupport)
return ParseModuleImport(AtLoc);
Diag(AtLoc, diag::err_atimport);
SkipUntil(tok::semi);
@@ -307,72 +307,6 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
return ClsType;
}
-/// The Objective-C property callback. This should be defined where
-/// it's used, but instead it's been lifted to here to support VS2005.
-struct Parser::ObjCPropertyCallback : FieldCallback {
-private:
- virtual void anchor();
-public:
- Parser &P;
- SmallVectorImpl<Decl *> &Props;
- ObjCDeclSpec &OCDS;
- SourceLocation AtLoc;
- SourceLocation LParenLoc;
- tok::ObjCKeywordKind MethodImplKind;
-
- ObjCPropertyCallback(Parser &P,
- SmallVectorImpl<Decl *> &Props,
- ObjCDeclSpec &OCDS, SourceLocation AtLoc,
- SourceLocation LParenLoc,
- tok::ObjCKeywordKind MethodImplKind) :
- P(P), Props(Props), OCDS(OCDS), AtLoc(AtLoc), LParenLoc(LParenLoc),
- MethodImplKind(MethodImplKind) {
- }
-
- void invoke(ParsingFieldDeclarator &FD) override {
- if (FD.D.getIdentifier() == nullptr) {
- P.Diag(AtLoc, diag::err_objc_property_requires_field_name)
- << FD.D.getSourceRange();
- return;
- }
- if (FD.BitfieldSize) {
- P.Diag(AtLoc, diag::err_objc_property_bitfield)
- << FD.D.getSourceRange();
- return;
- }
-
- // Install the property declarator into interfaceDecl.
- IdentifierInfo *SelName =
- OCDS.getGetterName() ? OCDS.getGetterName() : FD.D.getIdentifier();
-
- Selector GetterSel =
- P.PP.getSelectorTable().getNullarySelector(SelName);
- IdentifierInfo *SetterName = OCDS.getSetterName();
- Selector SetterSel;
- if (SetterName)
- SetterSel = P.PP.getSelectorTable().getSelector(1, &SetterName);
- else
- SetterSel =
- SelectorTable::constructSetterSelector(P.PP.getIdentifierTable(),
- P.PP.getSelectorTable(),
- FD.D.getIdentifier());
- bool isOverridingProperty = false;
- Decl *Property =
- P.Actions.ActOnProperty(P.getCurScope(), AtLoc, LParenLoc,
- FD, OCDS,
- GetterSel, SetterSel,
- &isOverridingProperty,
- MethodImplKind);
- if (!isOverridingProperty)
- Props.push_back(Property);
-
- FD.complete(Property);
- }
-};
-
-void Parser::ObjCPropertyCallback::anchor() {
-}
-
/// objc-interface-decl-list:
/// empty
/// objc-interface-decl-list objc-property-decl [OBJC2]
@@ -511,12 +445,44 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
ParseObjCPropertyAttribute(OCDS);
}
- ObjCPropertyCallback Callback(*this, allProperties,
- OCDS, AtLoc, LParenLoc, MethodImplKind);
+ auto ObjCPropertyCallback = [&](ParsingFieldDeclarator &FD) {
+ if (FD.D.getIdentifier() == nullptr) {
+ Diag(AtLoc, diag::err_objc_property_requires_field_name)
+ << FD.D.getSourceRange();
+ return;
+ }
+ if (FD.BitfieldSize) {
+ Diag(AtLoc, diag::err_objc_property_bitfield)
+ << FD.D.getSourceRange();
+ return;
+ }
+
+ // Install the property declarator into interfaceDecl.
+ IdentifierInfo *SelName =
+ OCDS.getGetterName() ? OCDS.getGetterName() : FD.D.getIdentifier();
+
+ Selector GetterSel = PP.getSelectorTable().getNullarySelector(SelName);
+ IdentifierInfo *SetterName = OCDS.getSetterName();
+ Selector SetterSel;
+ if (SetterName)
+ SetterSel = PP.getSelectorTable().getSelector(1, &SetterName);
+ else
+ SetterSel = SelectorTable::constructSetterSelector(
+ PP.getIdentifierTable(), PP.getSelectorTable(),
+ FD.D.getIdentifier());
+ bool isOverridingProperty = false;
+ Decl *Property = Actions.ActOnProperty(
+ getCurScope(), AtLoc, LParenLoc, FD, OCDS, GetterSel, SetterSel,
+ &isOverridingProperty, MethodImplKind);
+ if (!isOverridingProperty)
+ allProperties.push_back(Property);
+
+ FD.complete(Property);
+ };
// Parse all the comma separated declarators.
ParsingDeclSpec DS(*this);
- ParseStructDeclaration(DS, Callback);
+ ParseStructDeclaration(DS, ObjCPropertyCallback);
ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list);
break;
@@ -1338,35 +1304,22 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
Sema::PCC_ObjCInstanceVariableList);
return cutOffParsing();
}
-
- struct ObjCIvarCallback : FieldCallback {
- Parser &P;
- Decl *IDecl;
- tok::ObjCKeywordKind visibility;
- SmallVectorImpl<Decl *> &AllIvarDecls;
-
- ObjCIvarCallback(Parser &P, Decl *IDecl, tok::ObjCKeywordKind V,
- SmallVectorImpl<Decl *> &AllIvarDecls) :
- P(P), IDecl(IDecl), visibility(V), AllIvarDecls(AllIvarDecls) {
- }
- void invoke(ParsingFieldDeclarator &FD) override {
- P.Actions.ActOnObjCContainerStartDefinition(IDecl);
- // Install the declarator into the interface decl.
- Decl *Field
- = P.Actions.ActOnIvar(P.getCurScope(),
- FD.D.getDeclSpec().getSourceRange().getBegin(),
- FD.D, FD.BitfieldSize, visibility);
- P.Actions.ActOnObjCContainerFinishDefinition();
- if (Field)
- AllIvarDecls.push_back(Field);
- FD.complete(Field);
- }
- } Callback(*this, interfaceDecl, visibility, AllIvarDecls);
-
+ auto ObjCIvarCallback = [&](ParsingFieldDeclarator &FD) {
+ Actions.ActOnObjCContainerStartDefinition(interfaceDecl);
+ // Install the declarator into the interface decl.
+ Decl *Field = Actions.ActOnIvar(
+ getCurScope(), FD.D.getDeclSpec().getSourceRange().getBegin(), FD.D,
+ FD.BitfieldSize, visibility);
+ Actions.ActOnObjCContainerFinishDefinition();
+ if (Field)
+ AllIvarDecls.push_back(Field);
+ FD.complete(Field);
+ };
+
// Parse all the comma separated declarators.
ParsingDeclSpec DS(*this);
- ParseStructDeclaration(DS, Callback);
+ ParseStructDeclaration(DS, ObjCIvarCallback);
if (Tok.is(tok::semi)) {
ConsumeToken();
@@ -2071,7 +2024,13 @@ StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
if (Tok.isObjCAtKeyword(tok::objc_autoreleasepool))
return ParseObjCAutoreleasePoolStmt(AtLoc);
-
+
+ if (Tok.isObjCAtKeyword(tok::objc_import) &&
+ getLangOpts().DebuggerSupport) {
+ SkipUntil(tok::semi);
+ return Actions.ActOnNullStmt(Tok.getLocation());
+ }
+
ExprResult Res(ParseExpressionWithLeadingAt(AtLoc));
if (Res.isInvalid()) {
// If the expression is invalid, skip ahead to the next semicolon. Not
@@ -2217,7 +2176,10 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
if (!Actions.isSimpleTypeSpecifier(Tok.getKind())) {
// objc-receiver:
// expression
- ExprResult Receiver = ParseExpression();
+ // Make sure any typos in the receiver are corrected or diagnosed, so that
+ // proper recovery can happen. FIXME: Perhaps filter the corrected expr to
+ // only the things that are valid ObjC receivers?
+ ExprResult Receiver = Actions.CorrectDelayedTyposInExpr(ParseExpression());
if (Receiver.isInvalid())
return true;
@@ -2394,7 +2356,7 @@ ExprResult Parser::ParseObjCMessageExpression() {
}
// Otherwise, an arbitrary expression can be the receiver of a send.
- ExprResult Res(ParseExpression());
+ ExprResult Res = Actions.CorrectDelayedTyposInExpr(ParseExpression());
if (Res.isInvalid()) {
SkipUntil(tok::r_square, StopAtSemi);
return Res;
@@ -2446,7 +2408,7 @@ ExprResult
Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
- ExprArg ReceiverExpr) {
+ Expr *ReceiverExpr) {
InMessageExpressionRAIIObject InMessage(*this, true);
if (Tok.is(tok::code_completion)) {
@@ -2553,6 +2515,8 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
SourceLocation commaLoc = ConsumeToken(); // Eat the ','.
/// Parse the expression after ','
ExprResult Res(ParseAssignmentExpression());
+ if (Tok.is(tok::colon))
+ Res = Actions.CorrectDelayedTyposInExpr(Res);
if (Res.isInvalid()) {
if (Tok.is(tok::colon)) {
Diag(commaLoc, diag::note_extra_comma_message_arg) <<
diff --git a/lib/Parse/ParseOpenMP.cpp b/lib/Parse/ParseOpenMP.cpp
index d1544e6baa5b..764619aae6f3 100644
--- a/lib/Parse/ParseOpenMP.cpp
+++ b/lib/Parse/ParseOpenMP.cpp
@@ -26,23 +26,31 @@ using namespace clang;
//===----------------------------------------------------------------------===//
static OpenMPDirectiveKind ParseOpenMPDirectiveKind(Parser &P) {
+ // Array of foldings: F[i][0] F[i][1] ===> F[i][2].
+ // E.g.: OMPD_for OMPD_simd ===> OMPD_for_simd
+ // TODO: add other combined directives in topological order.
+ const OpenMPDirectiveKind F[][3] = {
+ { OMPD_for, OMPD_simd, OMPD_for_simd },
+ { OMPD_parallel, OMPD_for, OMPD_parallel_for },
+ { OMPD_parallel_for, OMPD_simd, OMPD_parallel_for_simd },
+ { OMPD_parallel, OMPD_sections, OMPD_parallel_sections }
+ };
auto Tok = P.getCurToken();
auto DKind =
Tok.isAnnotation()
? OMPD_unknown
: getOpenMPDirectiveKind(P.getPreprocessor().getSpelling(Tok));
- if (DKind == OMPD_parallel) {
- Tok = P.getPreprocessor().LookAhead(0);
- auto SDKind =
- Tok.isAnnotation()
- ? OMPD_unknown
- : getOpenMPDirectiveKind(P.getPreprocessor().getSpelling(Tok));
- if (SDKind == OMPD_for) {
- P.ConsumeToken();
- DKind = OMPD_parallel_for;
- } else if (SDKind == OMPD_sections) {
- P.ConsumeToken();
- DKind = OMPD_parallel_sections;
+ for (unsigned i = 0; i < llvm::array_lengthof(F); ++i) {
+ if (DKind == F[i][0]) {
+ Tok = P.getPreprocessor().LookAhead(0);
+ auto SDKind =
+ Tok.isAnnotation()
+ ? OMPD_unknown
+ : getOpenMPDirectiveKind(P.getPreprocessor().getSpelling(Tok));
+ if (SDKind == F[i][1]) {
+ P.ConsumeToken();
+ DKind = F[i][2];
+ }
}
}
return DKind;
@@ -88,13 +96,19 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirective() {
case OMPD_taskwait:
case OMPD_flush:
case OMPD_for:
+ case OMPD_for_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
+ case OMPD_ordered:
case OMPD_critical:
case OMPD_parallel_for:
+ case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
+ case OMPD_atomic:
+ case OMPD_target:
+ case OMPD_teams:
Diag(Tok, diag::err_omp_unexpected_directive)
<< getOpenMPDirectiveName(DKind);
break;
@@ -113,7 +127,9 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirective() {
/// annot_pragma_openmp 'parallel' | 'simd' | 'for' | 'sections' |
/// 'section' | 'single' | 'master' | 'critical' [ '(' <name> ')' ] |
/// 'parallel for' | 'parallel sections' | 'task' | 'taskyield' |
-/// 'barrier' | 'taskwait' | 'flush' {clause} annot_pragma_openmp_end
+/// 'barrier' | 'taskwait' | 'flush' | 'ordered' | 'atomic' |
+/// 'for simd' | 'parallel for simd' | 'target' | 'teams' {clause}
+/// annot_pragma_openmp_end
///
StmtResult
Parser::ParseOpenMPDeclarativeOrExecutableDirective(bool StandAloneAllowed) {
@@ -169,14 +185,20 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(bool StandAloneAllowed) {
case OMPD_parallel:
case OMPD_simd:
case OMPD_for:
+ case OMPD_for_simd:
case OMPD_sections:
case OMPD_single:
case OMPD_section:
case OMPD_master:
case OMPD_critical:
case OMPD_parallel_for:
+ case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
- case OMPD_task: {
+ case OMPD_task:
+ case OMPD_ordered:
+ case OMPD_atomic:
+ case OMPD_target:
+ case OMPD_teams: {
ConsumeToken();
// Parse directive name of the 'critical' directive if any.
if (DKind == OMPD_critical) {
@@ -337,7 +359,8 @@ bool Parser::ParseOpenMPSimpleVarList(OpenMPDirectiveKind Kind,
/// | linear-clause | aligned-clause | collapse-clause |
/// lastprivate-clause | reduction-clause | proc_bind-clause |
/// schedule-clause | copyin-clause | copyprivate-clause | untied-clause |
-/// mergeable-clause | flush-clause
+/// mergeable-clause | flush-clause | read-clause | write-clause |
+/// update-clause | capture-clause | seq_cst-clause
///
OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause) {
@@ -368,6 +391,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause) << getOpenMPDirectiveName(DKind)
<< getOpenMPClauseName(CKind);
+ ErrorFound = true;
}
Clause = ParseOpenMPSingleExprClause(CKind);
@@ -382,6 +406,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause) << getOpenMPDirectiveName(DKind)
<< getOpenMPClauseName(CKind);
+ ErrorFound = true;
}
Clause = ParseOpenMPSimpleClause(CKind);
@@ -392,6 +417,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause) << getOpenMPDirectiveName(DKind)
<< getOpenMPClauseName(CKind);
+ ErrorFound = true;
}
Clause = ParseOpenMPSingleExprWithArgClause(CKind);
@@ -400,6 +426,11 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_nowait:
case OMPC_untied:
case OMPC_mergeable:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
// OpenMP [2.7.1, Restrictions, p. 9]
// Only one ordered clause can appear on a loop directive.
// OpenMP [2.7.1, Restrictions, C/C++, p. 4]
@@ -407,6 +438,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause) << getOpenMPDirectiveName(DKind)
<< getOpenMPClauseName(CKind);
+ ErrorFound = true;
}
Clause = ParseOpenMPClause(CKind);
@@ -522,6 +554,9 @@ OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind) {
/// mergeable-clause:
/// 'mergeable'
///
+/// read-clause:
+/// 'read'
+///
OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind) {
SourceLocation Loc = Tok.getLocation();
ConsumeAnyToken();
@@ -680,7 +715,8 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPClauseKind Kind) {
Tok.isNot(tok::annot_pragma_openmp_end))) {
ColonProtectionRAIIObject ColonRAII(*this, MayHaveTail);
// Parse variable
- ExprResult VarExpr = ParseAssignmentExpression();
+ ExprResult VarExpr =
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
if (VarExpr.isUsable()) {
Vars.push_back(VarExpr.get());
} else {
@@ -706,7 +742,8 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPClauseKind Kind) {
if (MustHaveTail) {
ColonLoc = Tok.getLocation();
ConsumeToken();
- ExprResult Tail = ParseAssignmentExpression();
+ ExprResult Tail =
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
if (Tail.isUsable())
TailExpr = Tail.get();
else
diff --git a/lib/Parse/ParsePragma.cpp b/lib/Parse/ParsePragma.cpp
index d3777f3ea4a0..473be5467eeb 100644
--- a/lib/Parse/ParsePragma.cpp
+++ b/lib/Parse/ParsePragma.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTContext.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Parse/ParseDiagnostic.h"
@@ -228,6 +229,9 @@ void Parser::initializePragmaHandlers() {
UnrollHintHandler.reset(new PragmaUnrollHintHandler("unroll"));
PP.AddPragmaHandler(UnrollHintHandler.get());
+
+ NoUnrollHintHandler.reset(new PragmaUnrollHintHandler("nounroll"));
+ PP.AddPragmaHandler(NoUnrollHintHandler.get());
}
void Parser::resetPragmaHandlers() {
@@ -291,6 +295,9 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler(UnrollHintHandler.get());
UnrollHintHandler.reset();
+
+ PP.RemovePragmaHandler(NoUnrollHintHandler.get());
+ NoUnrollHintHandler.reset();
}
/// \brief Handle the annotation token produced for #pragma unused(...)
@@ -525,36 +532,50 @@ bool Parser::HandlePragmaMSSection(StringRef PragmaName,
<< PragmaName;
return false;
}
- int SectionFlags = 0;
+ int SectionFlags = ASTContext::PSF_Read;
+ bool SectionFlagsAreDefault = true;
while (Tok.is(tok::comma)) {
PP.Lex(Tok); // ,
+ // Ignore "long" and "short".
+ // They are undocumented, but widely used, section attributes which appear
+ // to do nothing.
+ if (Tok.is(tok::kw_long) || Tok.is(tok::kw_short)) {
+ PP.Lex(Tok); // long/short
+ continue;
+ }
+
if (!Tok.isAnyIdentifier()) {
PP.Diag(PragmaLocation, diag::warn_pragma_expected_action_or_r_paren)
<< PragmaName;
return false;
}
- Sema::PragmaSectionFlag Flag =
- llvm::StringSwitch<Sema::PragmaSectionFlag>(
+ ASTContext::PragmaSectionFlag Flag =
+ llvm::StringSwitch<ASTContext::PragmaSectionFlag>(
Tok.getIdentifierInfo()->getName())
- .Case("read", Sema::PSF_Read)
- .Case("write", Sema::PSF_Write)
- .Case("execute", Sema::PSF_Execute)
- .Case("shared", Sema::PSF_Invalid)
- .Case("nopage", Sema::PSF_Invalid)
- .Case("nocache", Sema::PSF_Invalid)
- .Case("discard", Sema::PSF_Invalid)
- .Case("remove", Sema::PSF_Invalid)
- .Default(Sema::PSF_None);
- if (Flag == Sema::PSF_None || Flag == Sema::PSF_Invalid) {
- PP.Diag(PragmaLocation, Flag == Sema::PSF_None
+ .Case("read", ASTContext::PSF_Read)
+ .Case("write", ASTContext::PSF_Write)
+ .Case("execute", ASTContext::PSF_Execute)
+ .Case("shared", ASTContext::PSF_Invalid)
+ .Case("nopage", ASTContext::PSF_Invalid)
+ .Case("nocache", ASTContext::PSF_Invalid)
+ .Case("discard", ASTContext::PSF_Invalid)
+ .Case("remove", ASTContext::PSF_Invalid)
+ .Default(ASTContext::PSF_None);
+ if (Flag == ASTContext::PSF_None || Flag == ASTContext::PSF_Invalid) {
+ PP.Diag(PragmaLocation, Flag == ASTContext::PSF_None
? diag::warn_pragma_invalid_specific_action
: diag::warn_pragma_unsupported_action)
<< PragmaName << Tok.getIdentifierInfo()->getName();
return false;
}
SectionFlags |= Flag;
+ SectionFlagsAreDefault = false;
PP.Lex(Tok); // Identifier
}
+ // If no section attributes are specified, the section will be marked as
+ // read/write.
+ if (SectionFlagsAreDefault)
+ SectionFlags |= ASTContext::PSF_Write;
if (Tok.isNot(tok::r_paren)) {
PP.Diag(PragmaLocation, diag::warn_pragma_expected_rparen) << PragmaName;
return false;
@@ -718,42 +739,124 @@ bool Parser::HandlePragmaMSInitSeg(StringRef PragmaName,
struct PragmaLoopHintInfo {
Token PragmaName;
Token Option;
- Token Value;
- bool HasValue;
+ Token *Toks;
+ size_t TokSize;
+ PragmaLoopHintInfo() : Toks(nullptr), TokSize(0) {}
};
-LoopHint Parser::HandlePragmaLoopHint() {
+static std::string PragmaLoopHintString(Token PragmaName, Token Option) {
+ std::string PragmaString;
+ if (PragmaName.getIdentifierInfo()->getName() == "loop") {
+ PragmaString = "clang loop ";
+ PragmaString += Option.getIdentifierInfo()->getName();
+ } else {
+ assert(PragmaName.getIdentifierInfo()->getName() == "unroll" &&
+ "Unexpected pragma name");
+ PragmaString = "unroll";
+ }
+ return PragmaString;
+}
+
+bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
assert(Tok.is(tok::annot_pragma_loop_hint));
PragmaLoopHintInfo *Info =
static_cast<PragmaLoopHintInfo *>(Tok.getAnnotationValue());
- LoopHint Hint;
- Hint.PragmaNameLoc =
- IdentifierLoc::create(Actions.Context, Info->PragmaName.getLocation(),
- Info->PragmaName.getIdentifierInfo());
- Hint.OptionLoc =
- IdentifierLoc::create(Actions.Context, Info->Option.getLocation(),
- Info->Option.getIdentifierInfo());
- if (Info->HasValue) {
- Hint.Range =
- SourceRange(Info->Option.getLocation(), Info->Value.getLocation());
- Hint.ValueLoc =
- IdentifierLoc::create(Actions.Context, Info->Value.getLocation(),
- Info->Value.getIdentifierInfo());
-
- // FIXME: We should allow non-type template parameters for the loop hint
- // value. See bug report #19610
- if (Info->Value.is(tok::numeric_constant))
- Hint.ValueExpr = Actions.ActOnNumericConstant(Info->Value).get();
- else
- Hint.ValueExpr = nullptr;
+ IdentifierInfo *PragmaNameInfo = Info->PragmaName.getIdentifierInfo();
+ Hint.PragmaNameLoc = IdentifierLoc::create(
+ Actions.Context, Info->PragmaName.getLocation(), PragmaNameInfo);
+
+ // It is possible that the loop hint has no option identifier, such as
+ // #pragma unroll(4).
+ IdentifierInfo *OptionInfo = Info->Option.is(tok::identifier)
+ ? Info->Option.getIdentifierInfo()
+ : nullptr;
+ Hint.OptionLoc = IdentifierLoc::create(
+ Actions.Context, Info->Option.getLocation(), OptionInfo);
+
+ Token *Toks = Info->Toks;
+ size_t TokSize = Info->TokSize;
+
+ // Return a valid hint if pragma unroll or nounroll were specified
+ // without an argument.
+ bool PragmaUnroll = PragmaNameInfo->getName() == "unroll";
+ bool PragmaNoUnroll = PragmaNameInfo->getName() == "nounroll";
+ if (TokSize == 0 && (PragmaUnroll || PragmaNoUnroll)) {
+ ConsumeToken(); // The annotation token.
+ Hint.Range = Info->PragmaName.getLocation();
+ return true;
+ }
+
+ // The constant expression is always followed by an eof token, which increases
+ // the TokSize by 1.
+ assert(TokSize > 0 &&
+ "PragmaLoopHintInfo::Toks must contain at least one token.");
+
+ // If no option is specified the argument is assumed to be a constant expr.
+ bool StateOption = false;
+ if (OptionInfo) { // Pragma unroll does not specify an option.
+ StateOption = llvm::StringSwitch<bool>(OptionInfo->getName())
+ .Case("vectorize", true)
+ .Case("interleave", true)
+ .Case("unroll", true)
+ .Default(false);
+ }
+
+ // Verify loop hint has an argument.
+ if (Toks[0].is(tok::eof)) {
+ ConsumeToken(); // The annotation token.
+ Diag(Toks[0].getLocation(), diag::err_pragma_loop_missing_argument)
+ << /*StateArgument=*/StateOption << /*FullKeyword=*/PragmaUnroll;
+ return false;
+ }
+
+ // Validate the argument.
+ if (StateOption) {
+ ConsumeToken(); // The annotation token.
+ bool OptionUnroll = OptionInfo->isStr("unroll");
+ SourceLocation StateLoc = Toks[0].getLocation();
+ IdentifierInfo *StateInfo = Toks[0].getIdentifierInfo();
+ if (!StateInfo || ((OptionUnroll ? !StateInfo->isStr("full")
+ : !StateInfo->isStr("enable")) &&
+ !StateInfo->isStr("disable"))) {
+ Diag(Toks[0].getLocation(), diag::err_pragma_invalid_keyword)
+ << /*FullKeyword=*/OptionUnroll;
+ return false;
+ }
+ if (TokSize > 2)
+ Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << PragmaLoopHintString(Info->PragmaName, Info->Option);
+ Hint.StateLoc = IdentifierLoc::create(Actions.Context, StateLoc, StateInfo);
} else {
- Hint.Range = SourceRange(Info->PragmaName.getLocation());
- Hint.ValueLoc = nullptr;
- Hint.ValueExpr = nullptr;
+ // Enter constant expression including eof terminator into token stream.
+ PP.EnterTokenStream(Toks, TokSize, /*DisableMacroExpansion=*/false,
+ /*OwnsTokens=*/false);
+ ConsumeToken(); // The annotation token.
+
+ ExprResult R = ParseConstantExpression();
+
+ // Tokens following an error in an ill-formed constant expression will
+ // remain in the token stream and must be removed.
+ if (Tok.isNot(tok::eof)) {
+ Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << PragmaLoopHintString(Info->PragmaName, Info->Option);
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ }
+
+ ConsumeToken(); // Consume the constant expression eof terminator.
+
+ if (R.isInvalid() ||
+ Actions.CheckLoopHintExpr(R.get(), Toks[0].getLocation()))
+ return false;
+
+ // Argument is a constant expression with an integer type.
+ Hint.ValueExpr = R.get();
}
- return Hint;
+ Hint.Range = SourceRange(Info->PragmaName.getLocation(),
+ Info->Toks[TokSize - 1].getLocation());
+ return true;
}
// #pragma GCC visibility comes in two variants:
@@ -1725,8 +1828,7 @@ void PragmaOptimizeHandler::HandlePragma(Preprocessor &PP,
PP.Lex(Tok);
if (Tok.is(tok::eod)) {
PP.Diag(Tok.getLocation(), diag::err_pragma_missing_argument)
- << "clang optimize"
- << "'on' or 'off'";
+ << "clang optimize" << /*Expected=*/true << "'on' or 'off'";
return;
}
if (Tok.isNot(tok::identifier)) {
@@ -1756,44 +1858,48 @@ void PragmaOptimizeHandler::HandlePragma(Preprocessor &PP,
}
/// \brief Parses loop or unroll pragma hint value and fills in Info.
-static bool ParseLoopHintValue(Preprocessor &PP, Token Tok, Token &PragmaName,
- Token &Option, bool &ValueInParens,
+static bool ParseLoopHintValue(Preprocessor &PP, Token &Tok, Token PragmaName,
+ Token Option, bool ValueInParens,
PragmaLoopHintInfo &Info) {
- ValueInParens = Tok.is(tok::l_paren);
- if (ValueInParens) {
- PP.Lex(Tok);
- if (Tok.is(tok::r_paren)) {
- // Nothing between the parentheses.
- std::string PragmaString;
- if (PragmaName.getIdentifierInfo()->getName() == "loop") {
- PragmaString = "clang loop ";
- PragmaString += Option.getIdentifierInfo()->getName();
- } else {
- assert(PragmaName.getIdentifierInfo()->getName() == "unroll" &&
- "Unexpected pragma name");
- PragmaString = "unroll";
- }
- PP.Diag(Tok.getLocation(), diag::err_pragma_missing_argument)
- << PragmaString << "a positive integer value";
- return true;
+ SmallVector<Token, 1> ValueList;
+ int OpenParens = ValueInParens ? 1 : 0;
+ // Read constant expression.
+ while (Tok.isNot(tok::eod)) {
+ if (Tok.is(tok::l_paren))
+ OpenParens++;
+ else if (Tok.is(tok::r_paren)) {
+ OpenParens--;
+ if (OpenParens == 0 && ValueInParens)
+ break;
}
- }
- // FIXME: Value should be stored and parsed as a constant expression.
- Token Value = Tok;
+ ValueList.push_back(Tok);
+ PP.Lex(Tok);
+ }
if (ValueInParens) {
- PP.Lex(Tok);
+ // Read ')'
if (Tok.isNot(tok::r_paren)) {
PP.Diag(Tok.getLocation(), diag::err_expected) << tok::r_paren;
return true;
}
+ PP.Lex(Tok);
}
+ Token EOFTok;
+ EOFTok.startToken();
+ EOFTok.setKind(tok::eof);
+ EOFTok.setLocation(Tok.getLocation());
+ ValueList.push_back(EOFTok); // Terminates expression for parsing.
+
+ Token *TokenArray = (Token *)PP.getPreprocessorAllocator().Allocate(
+ ValueList.size() * sizeof(Token), llvm::alignOf<Token>());
+ std::copy(ValueList.begin(), ValueList.end(), TokenArray);
+ Info.Toks = TokenArray;
+ Info.TokSize = ValueList.size();
+
Info.PragmaName = PragmaName;
Info.Option = Option;
- Info.Value = Value;
- Info.HasValue = true;
return false;
}
@@ -1806,7 +1912,7 @@ static bool ParseLoopHintValue(Preprocessor &PP, Token Tok, Token &PragmaName,
/// loop-hint:
/// 'vectorize' '(' loop-hint-keyword ')'
/// 'interleave' '(' loop-hint-keyword ')'
-/// 'unroll' '(' loop-hint-keyword ')'
+/// 'unroll' '(' unroll-hint-keyword ')'
/// 'vectorize_width' '(' loop-hint-value ')'
/// 'interleave_count' '(' loop-hint-value ')'
/// 'unroll_count' '(' loop-hint-value ')'
@@ -1815,6 +1921,10 @@ static bool ParseLoopHintValue(Preprocessor &PP, Token Tok, Token &PragmaName,
/// 'enable'
/// 'disable'
///
+/// unroll-hint-keyword:
+/// 'full'
+/// 'disable'
+///
/// loop-hint-value:
/// constant-expression
///
@@ -1829,12 +1939,10 @@ static bool ParseLoopHintValue(Preprocessor &PP, Token Tok, Token &PragmaName,
/// only works on inner loops.
///
/// The unroll and unroll_count directives control the concatenation
-/// unroller. Specifying unroll(enable) instructs llvm to try to
+/// unroller. Specifying unroll(full) instructs llvm to try to
/// unroll the loop completely, and unroll(disable) disables unrolling
/// for the loop. Specifying unroll_count(_value_) instructs llvm to
/// try to unroll the loop the number of times indicated by the value.
-/// If unroll(enable) and unroll_count are both specified only
-/// unroll_count takes effect.
void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
PragmaIntroducerKind Introducer,
Token &Tok) {
@@ -1867,17 +1975,19 @@ void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
<< /*MissingOption=*/false << OptionInfo;
return;
}
-
- auto *Info = new (PP.getPreprocessorAllocator()) PragmaLoopHintInfo;
PP.Lex(Tok);
- bool ValueInParens;
- if (ParseLoopHintValue(PP, Tok, PragmaName, Option, ValueInParens, *Info))
- return;
- if (!ValueInParens) {
- PP.Diag(Info->Value.getLocation(), diag::err_expected) << tok::l_paren;
+ // Read '('
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_expected) << tok::l_paren;
return;
}
+ PP.Lex(Tok);
+
+ auto *Info = new (PP.getPreprocessorAllocator()) PragmaLoopHintInfo;
+ if (ParseLoopHintValue(PP, Tok, PragmaName, Option, /*ValueInParens=*/true,
+ *Info))
+ return;
// Generate the loop hint token.
Token LoopHintTok;
@@ -1886,9 +1996,6 @@ void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
LoopHintTok.setLocation(PragmaName.getLocation());
LoopHintTok.setAnnotationValue(static_cast<void *>(Info));
TokenList.push_back(LoopHintTok);
-
- // Get next optimization option.
- PP.Lex(Tok);
}
if (Tok.isNot(tok::eod)) {
@@ -1909,44 +2016,54 @@ void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
/// #pragma unroll
/// #pragma unroll unroll-hint-value
/// #pragma unroll '(' unroll-hint-value ')'
+/// #pragma nounroll
///
/// unroll-hint-value:
/// constant-expression
///
-/// Loop unrolling hints are specified with '#pragma unroll'. '#pragma unroll'
-/// can take a numeric argument optionally contained in parentheses. With no
-/// argument the directive instructs llvm to try to unroll the loop
-/// completely. A positive integer argument can be specified to indicate the
-/// number of times the loop should be unrolled. To maximize compatibility with
-/// other compilers the unroll count argument can be specified with or without
-/// parentheses.
+/// Loop unrolling hints can be specified with '#pragma unroll' or
+/// '#pragma nounroll'. '#pragma unroll' can take a numeric argument optionally
+/// contained in parentheses. With no argument the directive instructs llvm to
+/// try to unroll the loop completely. A positive integer argument can be
+/// specified to indicate the number of times the loop should be unrolled. To
+/// maximize compatibility with other compilers the unroll count argument can be
+/// specified with or without parentheses. Specifying, '#pragma nounroll'
+/// disables unrolling of the loop.
void PragmaUnrollHintHandler::HandlePragma(Preprocessor &PP,
PragmaIntroducerKind Introducer,
Token &Tok) {
- // Incoming token is "unroll" of "#pragma unroll".
+ // Incoming token is "unroll" for "#pragma unroll", or "nounroll" for
+ // "#pragma nounroll".
Token PragmaName = Tok;
PP.Lex(Tok);
auto *Info = new (PP.getPreprocessorAllocator()) PragmaLoopHintInfo;
if (Tok.is(tok::eod)) {
- // Unroll pragma without an argument.
+ // nounroll or unroll pragma without an argument.
Info->PragmaName = PragmaName;
- Info->Option = PragmaName;
- Info->HasValue = false;
+ Info->Option.startToken();
+ } else if (PragmaName.getIdentifierInfo()->getName() == "nounroll") {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "nounroll";
+ return;
} else {
// Unroll pragma with an argument: "#pragma unroll N" or
// "#pragma unroll(N)".
- bool ValueInParens;
- if (ParseLoopHintValue(PP, Tok, PragmaName, PragmaName, ValueInParens,
- *Info))
+ // Read '(' if it exists.
+ bool ValueInParens = Tok.is(tok::l_paren);
+ if (ValueInParens)
+ PP.Lex(Tok);
+
+ Token Option;
+ Option.startToken();
+ if (ParseLoopHintValue(PP, Tok, PragmaName, Option, ValueInParens, *Info))
return;
// In CUDA, the argument to '#pragma unroll' should not be contained in
// parentheses.
if (PP.getLangOpts().CUDA && ValueInParens)
- PP.Diag(Info->Value.getLocation(),
+ PP.Diag(Info->Toks[0].getLocation(),
diag::warn_pragma_unroll_cuda_value_in_parens);
- PP.Lex(Tok);
if (Tok.isNot(tok::eod)) {
PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
<< "unroll";
diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp
index ec0ca6b1fdd6..2a5f8406a50c 100644
--- a/lib/Parse/ParseStmt.cpp
+++ b/lib/Parse/ParseStmt.cpp
@@ -185,9 +185,9 @@ Retry:
if (Next.isNot(tok::coloncolon)) {
// Try to limit which sets of keywords should be included in typo
// correction based on what the next token is.
- StatementFilterCCC Validator(Next);
- if (TryAnnotateName(/*IsAddressOfOperand*/false, &Validator)
- == ANK_Error) {
+ if (TryAnnotateName(/*IsAddressOfOperand*/ false,
+ llvm::make_unique<StatementFilterCCC>(Next)) ==
+ ANK_Error) {
// Handle errors here by skipping up to the next semicolon or '}', and
// eat the semicolon if that's what stopped us.
SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
@@ -207,7 +207,7 @@ Retry:
default: {
if ((getLangOpts().CPlusPlus || !OnlyStatement) && isDeclarationStatement()) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy Decl = ParseDeclaration(Stmts, Declarator::BlockContext,
+ DeclGroupPtrTy Decl = ParseDeclaration(Declarator::BlockContext,
DeclEnd, Attrs);
return Actions.ActOnDeclStmt(Decl, DeclStart, DeclEnd);
}
@@ -424,27 +424,11 @@ StmtResult Parser::ParseSEHTryBlock() {
/// seh-finally-block
///
StmtResult Parser::ParseSEHTryBlockCommon(SourceLocation TryLoc) {
- if (Tok.isNot(tok::l_brace))
+ if(Tok.isNot(tok::l_brace))
return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace);
- int SEHTryIndex, SEHTryParentIndex;
- StmtResult TryBlock;
- {
- assert(Tok.is(tok::l_brace) && "Not a compount stmt!");
-
- // Enter a scope to hold everything within the compound stmt. Compound
- // statements can always hold declarations.
- ParseScope CompoundScope(this, Scope::DeclScope | Scope::SEHTryScope);
- SEHTryIndex = getCurScope()->getSEHTryIndex();
- SEHTryParentIndex = getCurScope()->getSEHTryParentIndex();
-
- // Parse the statements in the body.
- TryBlock = ParseCompoundStatementBody();
- }
-
- //StmtResult TryBlock(ParseCompoundStatement(/*isStmtExpr=*/false,
- // Scope::DeclScope | Scope::SEHTryScope));
-
+ StmtResult TryBlock(ParseCompoundStatement(/*isStmtExpr=*/false,
+ Scope::DeclScope | Scope::SEHTryScope));
if(TryBlock.isInvalid())
return TryBlock;
@@ -466,9 +450,7 @@ StmtResult Parser::ParseSEHTryBlockCommon(SourceLocation TryLoc) {
return Actions.ActOnSEHTryBlock(false /* IsCXXTry */,
TryLoc,
TryBlock.get(),
- Handler.get(),
- SEHTryIndex,
- SEHTryParentIndex);
+ Handler.get());
}
/// ParseSEHExceptBlock - Handle __except
@@ -660,6 +642,11 @@ StmtResult Parser::ParseCaseStatement(bool MissingCase, ExprResult Expr) {
ExprResult LHS;
if (!MissingCase) {
LHS = ParseConstantExpression();
+ if (!getLangOpts().CPlusPlus11) {
+ LHS = Actions.CorrectDelayedTyposInExpr(LHS, [this](class Expr *E) {
+ return Actions.VerifyIntegerConstantExpression(E);
+ });
+ }
if (LHS.isInvalid()) {
// If constant-expression is parsed unsuccessfully, recover by skipping
// current case statement (moving to the colon that ends it).
@@ -974,8 +961,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
ExtensionRAIIObject O(Diags);
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy Res = ParseDeclaration(Stmts,
- Declarator::BlockContext, DeclEnd,
+ DeclGroupPtrTy Res = ParseDeclaration(Declarator::BlockContext, DeclEnd,
attrs);
R = Actions.ActOnDeclStmt(Res, DeclStart, DeclEnd);
} else {
@@ -1534,9 +1520,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
else
ForRangeInit.RangeExpr = ParseExpression();
- Diag(Loc, getLangOpts().CPlusPlus1z
- ? diag::warn_cxx1y_compat_for_range_identifier
- : diag::ext_for_range_identifier)
+ Diag(Loc, diag::err_for_range_identifier)
<< ((getLangOpts().CPlusPlus11 && !getLangOpts().CPlusPlus1z)
? FixItHint::CreateInsertion(Loc, "auto &&")
: FixItHint());
@@ -1554,9 +1538,8 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- StmtVector Stmts;
DeclGroupPtrTy DG = ParseSimpleDeclaration(
- Stmts, Declarator::ForContext, DeclEnd, attrs, false,
+ Declarator::ForContext, DeclEnd, attrs, false,
MightBeForRangeStmt ? &ForRangeInit : nullptr);
FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
if (ForRangeInit.ParsedForRangeDecl()) {
@@ -1582,7 +1565,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
}
} else {
ProhibitAttributes(attrs);
- Value = ParseExpression();
+ Value = Actions.CorrectDelayedTyposInExpr(ParseExpression());
ForEach = isTokIdentifier_in();
@@ -1818,7 +1801,7 @@ StmtResult Parser::ParseReturnStatement() {
diag::ext_generalized_initializer_lists)
<< R.get()->getSourceRange();
} else
- R = ParseExpression();
+ R = ParseExpression();
if (R.isInvalid()) {
SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
return StmtError();
@@ -1835,10 +1818,11 @@ StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts, bool OnlyStatement,
// Get loop hints and consume annotated token.
while (Tok.is(tok::annot_pragma_loop_hint)) {
- LoopHint Hint = HandlePragmaLoopHint();
- ConsumeToken();
+ LoopHint Hint;
+ if (!HandlePragmaLoopHint(Hint))
+ continue;
- ArgsUnion ArgHints[] = {Hint.PragmaNameLoc, Hint.OptionLoc, Hint.ValueLoc,
+ ArgsUnion ArgHints[] = {Hint.PragmaNameLoc, Hint.OptionLoc, Hint.StateLoc,
ArgsUnion(Hint.ValueExpr)};
TempAttrs.addNew(Hint.PragmaNameLoc->Ident, Hint.Range, nullptr,
Hint.PragmaNameLoc->Loc, ArgHints, 4,
@@ -1975,23 +1959,10 @@ StmtResult Parser::ParseCXXTryBlock() {
StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry) {
if (Tok.isNot(tok::l_brace))
return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace);
- // FIXME: Possible draft standard bug: attribute-specifier should be allowed?
- int SEHTryIndex, SEHTryParentIndex;
- StmtResult TryBlock;
- {
- assert(Tok.is(tok::l_brace) && "Not a compount stmt!");
-
- // Enter a scope to hold everything within the compound stmt. Compound
- // statements can always hold declarations.
- ParseScope CompoundScope(this, Scope::DeclScope | Scope::TryScope |
- (FnTry ? Scope::FnTryCatchScope : 0));
- SEHTryIndex = getCurScope()->getSEHTryIndex();
- SEHTryParentIndex = getCurScope()->getSEHTryParentIndex();
-
- // Parse the statements in the body.
- TryBlock = ParseCompoundStatementBody();
- }
+ StmtResult TryBlock(ParseCompoundStatement(/*isStmtExpr=*/false,
+ Scope::DeclScope | Scope::TryScope |
+ (FnTry ? Scope::FnTryCatchScope : 0)));
if (TryBlock.isInvalid())
return TryBlock;
@@ -2016,9 +1987,7 @@ StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry) {
return Actions.ActOnSEHTryBlock(true /* IsCXXTry */,
TryLoc,
TryBlock.get(),
- Handler.get(),
- SEHTryIndex,
- SEHTryParentIndex);
+ Handler.get());
}
else {
StmtVector Handlers;
diff --git a/lib/Parse/ParseStmtAsm.cpp b/lib/Parse/ParseStmtAsm.cpp
index f7e8307883de..7bf4da6d1bd4 100644
--- a/lib/Parse/ParseStmtAsm.cpp
+++ b/lib/Parse/ParseStmtAsm.cpp
@@ -93,6 +93,15 @@ public:
return Info.OpDecl;
}
+ StringRef LookupInlineAsmLabel(StringRef Identifier, llvm::SourceMgr &LSM,
+ llvm::SMLoc Location,
+ bool Create) override {
+ SourceLocation Loc = translateLocation(LSM, Location);
+ LabelDecl *Label =
+ TheParser.getActions().GetOrCreateMSAsmLabel(Identifier, Loc, Create);
+ return Label->getMSAsmLabel();
+ }
+
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset) override {
return TheParser.getActions().LookupInlineAsmField(Base, Member, Offset,
@@ -133,14 +142,13 @@ private:
}
}
- void handleDiagnostic(const llvm::SMDiagnostic &D) {
+ SourceLocation translateLocation(const llvm::SourceMgr &LSM, llvm::SMLoc SMLoc) {
// Compute an offset into the inline asm buffer.
// FIXME: This isn't right if .macro is involved (but hopefully, no
// real-world code does that).
- const llvm::SourceMgr &LSM = *D.getSourceMgr();
const llvm::MemoryBuffer *LBuf =
- LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
- unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart();
+ LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(SMLoc));
+ unsigned Offset = SMLoc.getPointer() - LBuf->getBufferStart();
// Figure out which token that offset points into.
const unsigned *TokOffsetPtr =
@@ -157,6 +165,12 @@ private:
Loc = Tok.getLocation();
Loc = Loc.getLocWithOffset(Offset - TokOffset);
}
+ return Loc;
+ }
+
+ void handleDiagnostic(const llvm::SMDiagnostic &D) {
+ const llvm::SourceMgr &LSM = *D.getSourceMgr();
+ SourceLocation Loc = translateLocation(LSM, D.getLoc());
TheParser.Diag(Loc, diag::err_inline_ms_asm_parsing) << D.getMessage();
}
};
@@ -322,6 +336,7 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
SourceLocation EndLoc = AsmLoc;
SmallVector<Token, 4> AsmToks;
+ bool SingleLineMode = true;
unsigned BraceNesting = 0;
unsigned short savedBraceCount = BraceCount;
bool InAsmComment = false;
@@ -333,6 +348,7 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
if (Tok.is(tok::l_brace)) {
// Braced inline asm: consume the opening brace.
+ SingleLineMode = false;
BraceNesting = 1;
EndLoc = ConsumeBrace();
LBraceLocs.push_back(EndLoc);
@@ -364,30 +380,39 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
} else if (!InAsmComment && Tok.is(tok::semi)) {
// A semicolon in an asm is the start of a comment.
InAsmComment = true;
- if (BraceNesting) {
+ if (!SingleLineMode) {
// Compute which line the comment is on.
std::pair<FileID, unsigned> ExpSemiLoc =
SrcMgr.getDecomposedExpansionLoc(TokLoc);
FID = ExpSemiLoc.first;
LineNo = SrcMgr.getLineNumber(FID, ExpSemiLoc.second);
}
- } else if (!BraceNesting || InAsmComment) {
+ } else if (SingleLineMode || InAsmComment) {
// If end-of-line is significant, check whether this token is on a
// new line.
std::pair<FileID, unsigned> ExpLoc =
SrcMgr.getDecomposedExpansionLoc(TokLoc);
if (ExpLoc.first != FID ||
SrcMgr.getLineNumber(ExpLoc.first, ExpLoc.second) != LineNo) {
- // If this is a single-line __asm, we're done.
- if (!BraceNesting)
+ // If this is a single-line __asm, we're done, except if the next
+ // line begins with an __asm too, in which case we finish a comment
+ // if needed and then keep processing the next line as a single
+ // line __asm.
+ bool isAsm = Tok.is(tok::kw_asm);
+ if (SingleLineMode && !isAsm)
break;
// We're no longer in a comment.
InAsmComment = false;
+ if (isAsm) {
+ LineNo = SrcMgr.getLineNumber(ExpLoc.first, ExpLoc.second);
+ SkippedStartOfLine = Tok.isAtStartOfLine();
+ }
} else if (!InAsmComment && Tok.is(tok::r_brace)) {
- // Single-line asm always ends when a closing brace is seen.
- // FIXME: This is compatible with Apple gcc's -fasm-blocks; what
- // does MSVC do here?
- break;
+ // In MSVC mode, braces only participate in brace matching and
+ // separating the asm statements. This is an intentional
+ // departure from the Apple gcc behavior.
+ if (!BraceNesting)
+ break;
}
}
if (!InAsmComment && BraceNesting && Tok.is(tok::r_brace) &&
@@ -398,7 +423,7 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
BraceNesting--;
// Finish if all of the opened braces in the inline asm section were
// consumed.
- if (BraceNesting == 0)
+ if (BraceNesting == 0 && !SingleLineMode)
break;
else {
LBraceLocs.pop_back();
@@ -487,11 +512,13 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
llvm::SourceMgr TempSrcMgr;
llvm::MCContext Ctx(MAI.get(), MRI.get(), MOFI.get(), &TempSrcMgr);
- llvm::MemoryBuffer *Buffer =
+ MOFI->InitMCObjectFileInfo(TT, llvm::Reloc::Default, llvm::CodeModel::Default,
+ Ctx);
+ std::unique_ptr<llvm::MemoryBuffer> Buffer =
llvm::MemoryBuffer::getMemBuffer(AsmString, "<MS inline asm>");
// Tell SrcMgr about this buffer, which is what the parser will pick up.
- TempSrcMgr.AddNewSourceBuffer(Buffer, llvm::SMLoc());
+ TempSrcMgr.AddNewSourceBuffer(std::move(Buffer), llvm::SMLoc());
std::unique_ptr<llvm::MCStreamer> Str(createNullStreamer(Ctx));
std::unique_ptr<llvm::MCAsmParser> Parser(
@@ -590,7 +617,7 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
}
DeclSpec DS(AttrFactory);
SourceLocation Loc = Tok.getLocation();
- ParseTypeQualifierListOpt(DS, true, false);
+ ParseTypeQualifierListOpt(DS, AR_VendorAttributesParsed);
// GNU asms accept, but warn, about type-qualifiers other than volatile.
if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
@@ -747,7 +774,7 @@ bool Parser::ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
// Read the parenthesized expression.
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- ExprResult Res(ParseExpression());
+ ExprResult Res = Actions.CorrectDelayedTyposInExpr(ParseExpression());
T.consumeClose();
if (Res.isInvalid()) {
SkipUntil(tok::r_paren, StopAtSemi);
diff --git a/lib/Parse/ParseTemplate.cpp b/lib/Parse/ParseTemplate.cpp
index fa6401f083a6..53de72cd3cc5 100644
--- a/lib/Parse/ParseTemplate.cpp
+++ b/lib/Parse/ParseTemplate.cpp
@@ -166,6 +166,14 @@ Parser::ParseSingleDeclarationAfterTemplate(
assert(TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
"Template information required");
+ if (Tok.is(tok::kw_static_assert)) {
+ // A static_assert declaration may not be templated.
+ Diag(Tok.getLocation(), diag::err_templated_invalid_declaration)
+ << TemplateInfo.getSourceRange();
+ // Parse the static_assert declaration to improve error recovery.
+ return ParseStaticAssertDeclaration(DeclEnd);
+ }
+
if (Context == Declarator::MemberContext) {
// We are parsing a member template.
ParseCXXClassMemberDeclaration(AS, AccessAttrs, TemplateInfo,
@@ -223,6 +231,16 @@ Parser::ParseSingleDeclarationAfterTemplate(
if (DeclaratorInfo.isFunctionDeclarator() &&
isStartOfFunctionDefinition(DeclaratorInfo)) {
+
+ // Function definitions are only allowed at file scope and in C++ classes.
+ // The C++ inline method definition case is handled elsewhere, so we only
+ // need to handle the file scope definition case.
+ if (Context != Declarator::FileContext) {
+ Diag(Tok, diag::err_function_definition_not_allowed);
+ SkipMalformedDecl();
+ return nullptr;
+ }
+
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
// Recover by ignoring the 'typedef'. This was probably supposed to be
// the 'typename' keyword, which we should have already suggested adding
@@ -553,7 +571,7 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
if (Tok.is(tok::kw_typename)) {
Diag(Tok.getLocation(),
getLangOpts().CPlusPlus1z
- ? diag::warn_cxx1y_compat_template_template_param_typename
+ ? diag::warn_cxx14_compat_template_template_param_typename
: diag::ext_template_template_param_typename)
<< (!getLangOpts().CPlusPlus1z
? FixItHint::CreateReplacement(Tok.getLocation(), "class")
@@ -668,7 +686,7 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
- DefaultArg = ParseAssignmentExpression();
+ DefaultArg = Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
if (DefaultArg.isInvalid())
SkipUntil(tok::comma, tok::greater, StopAtSemi | StopBeforeMatch);
}
@@ -1327,7 +1345,8 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope);
// Recreate the containing function DeclContext.
- Sema::ContextRAII FunctionSavedContext(Actions, Actions.getContainingDC(FunD));
+ Sema::ContextRAII FunctionSavedContext(Actions,
+ Actions.getContainingDC(FunD));
Actions.ActOnStartOfFunctionDef(getCurScope(), FunD);
diff --git a/lib/Parse/ParseTentative.cpp b/lib/Parse/ParseTentative.cpp
index 8514af2a8c89..abf16fa6222b 100644
--- a/lib/Parse/ParseTentative.cpp
+++ b/lib/Parse/ParseTentative.cpp
@@ -195,7 +195,9 @@ Parser::TPResult Parser::TryConsumeDeclarationSpecifier() {
}
}
- if (TryAnnotateCXXScopeToken())
+ if ((Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
+ Tok.is(tok::kw_decltype) || Tok.is(tok::annot_template_id)) &&
+ TryAnnotateCXXScopeToken())
return TPResult::Error;
if (Tok.is(tok::annot_cxxscope))
ConsumeToken();
@@ -837,6 +839,7 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
Tok.is(tok::kw___stdcall) ||
Tok.is(tok::kw___fastcall) ||
Tok.is(tok::kw___thiscall) ||
+ Tok.is(tok::kw___vectorcall) ||
Tok.is(tok::kw___unaligned))
return TPResult::True; // attributes indicate declaration
TPResult TPR = TryParseDeclarator(mayBeAbstract, mayHaveIdentifier);
@@ -891,6 +894,7 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
case tok::numeric_constant:
case tok::char_constant:
case tok::wide_char_constant:
+ case tok::utf8_char_constant:
case tok::utf16_char_constant:
case tok::utf32_char_constant:
case tok::string_literal:
@@ -984,9 +988,11 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
+ case tok::kw___vectorcall:
case tok::kw___unaligned:
case tok::kw___vector:
case tok::kw___pixel:
+ case tok::kw___bool:
case tok::kw__Atomic:
case tok::kw___unknown_anytype:
return TPResult::False;
@@ -1004,6 +1010,28 @@ bool Parser::isTentativelyDeclared(IdentifierInfo *II) {
!= TentativelyDeclaredIdentifiers.end();
}
+namespace {
+class TentativeParseCCC : public CorrectionCandidateCallback {
+public:
+ TentativeParseCCC(const Token &Next) {
+ WantRemainingKeywords = false;
+ WantTypeSpecifiers = Next.is(tok::l_paren) || Next.is(tok::r_paren) ||
+ Next.is(tok::greater) || Next.is(tok::l_brace) ||
+ Next.is(tok::identifier);
+ }
+
+ bool ValidateCandidate(const TypoCorrection &Candidate) override {
+ // Reject any candidate that only resolves to instance members since they
+ // aren't viable as standalone identifiers instead of member references.
+ if (Candidate.isResolved() && !Candidate.isKeyword() &&
+ std::all_of(Candidate.begin(), Candidate.end(),
+ [](NamedDecl *ND) { return ND->isCXXInstanceMember(); }))
+ return false;
+
+ return CorrectionCandidateCallback::ValidateCandidate(Candidate);
+ }
+};
+}
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a declaration
/// specifier, TPResult::False if it is not, TPResult::Ambiguous if it could
/// be either a decl-specifier or a function-style cast, and TPResult::Error
@@ -1129,11 +1157,8 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// a parse error one way or another. In that case, tell the caller that
// this is ambiguous. Typo-correct to type and expression keywords and
// to types and identifiers, in order to try to recover from errors.
- CorrectionCandidateCallback TypoCorrection;
- TypoCorrection.WantRemainingKeywords = false;
- TypoCorrection.WantTypeSpecifiers = Next.isNot(tok::arrow);
switch (TryAnnotateName(false /* no nested name specifier */,
- &TypoCorrection)) {
+ llvm::make_unique<TentativeParseCCC>(Next))) {
case ANK_Error:
return TPResult::Error;
case ANK_TentativeDecl:
@@ -1181,6 +1206,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
return TPResult::False;
}
// Fall through.
+ case tok::kw___super:
case tok::kw_decltype:
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
@@ -1250,6 +1276,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
+ case tok::kw___vectorcall:
case tok::kw___w64:
case tok::kw___sptr:
case tok::kw___uptr:
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index 37ce157e0812..7ccd2092a2d7 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -510,6 +510,13 @@ namespace {
};
}
+void Parser::LateTemplateParserCleanupCallback(void *P) {
+ // While this RAII helper doesn't bracket any actual work, the destructor will
+ // clean up annotations that were created during ActOnEndOfTranslationUnit
+ // when incremental processing is enabled.
+ DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(((Parser *)P)->TemplateIds);
+}
+
/// ParseTopLevelDecl - Parse one top-level declaration, return whatever the
/// action tells us to. This returns true if the EOF was encountered.
bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
@@ -542,7 +549,10 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
case tok::eof:
// Late template parsing can begin.
if (getLangOpts().DelayedTemplateParsing)
- Actions.SetLateTemplateParser(LateTemplateParserCallback, this);
+ Actions.SetLateTemplateParser(LateTemplateParserCallback,
+ PP.isIncrementalProcessingEnabled() ?
+ LateTemplateParserCleanupCallback : nullptr,
+ this);
if (!PP.isIncrementalProcessingEnabled())
Actions.ActOnEndOfTranslationUnit();
//else don't tell Sema that we ended parsing: more input might come.
@@ -624,8 +634,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
HandlePragmaOpenCLExtension();
return DeclGroupPtrTy();
case tok::annot_pragma_openmp:
- ParseOpenMPDeclarativeDirective();
- return DeclGroupPtrTy();
+ return ParseOpenMPDeclarativeDirective();
case tok::annot_pragma_ms_pointers_to_members:
HandlePragmaMSPointersToMembers();
return DeclGroupPtrTy();
@@ -697,8 +706,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// A function definition cannot start with any of these keywords.
{
SourceLocation DeclEnd;
- StmtVector Stmts;
- return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
+ return ParseDeclaration(Declarator::FileContext, DeclEnd, attrs);
}
case tok::kw_static:
@@ -708,8 +716,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 0;
SourceLocation DeclEnd;
- StmtVector Stmts;
- return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
+ return ParseDeclaration(Declarator::FileContext, DeclEnd, attrs);
}
goto dont_know;
@@ -720,8 +727,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// Inline namespaces. Allowed as an extension even in C++03.
if (NextKind == tok::kw_namespace) {
SourceLocation DeclEnd;
- StmtVector Stmts;
- return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
+ return ParseDeclaration(Declarator::FileContext, DeclEnd, attrs);
}
// Parse (then ignore) 'inline' prior to a template instantiation. This is
@@ -730,8 +736,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 1;
SourceLocation DeclEnd;
- StmtVector Stmts;
- return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
+ return ParseDeclaration(Declarator::FileContext, DeclEnd, attrs);
}
}
goto dont_know;
@@ -885,7 +890,7 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- return ParseDeclGroup(DS, Declarator::FileContext, true);
+ return ParseDeclGroup(DS, Declarator::FileContext);
}
Parser::DeclGroupPtrTy
@@ -922,7 +927,7 @@ Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributesWithRange &attrs,
Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
LateParsedAttrList *LateParsedAttrs) {
- // Poison the SEH identifiers so they are flagged as illegal in function bodies
+ // Poison SEH identifiers so they are flagged as illegal in function bodies.
PoisonSEHIdentifiersRAIIObject PoisonSEHIdentifiers(*this, true);
const DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
@@ -1218,27 +1223,24 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
/// [GNU] asm-string-literal:
/// string-literal
///
-Parser::ExprResult Parser::ParseAsmStringLiteral() {
- switch (Tok.getKind()) {
- case tok::string_literal:
- break;
- case tok::utf8_string_literal:
- case tok::utf16_string_literal:
- case tok::utf32_string_literal:
- case tok::wide_string_literal: {
- SourceLocation L = Tok.getLocation();
+ExprResult Parser::ParseAsmStringLiteral() {
+ if (!isTokenStringLiteral()) {
+ Diag(Tok, diag::err_expected_string_literal)
+ << /*Source='in...'*/0 << "'asm'";
+ return ExprError();
+ }
+
+ ExprResult AsmString(ParseStringLiteralExpression());
+ if (!AsmString.isInvalid()) {
+ const auto *SL = cast<StringLiteral>(AsmString.get());
+ if (!SL->isAscii()) {
Diag(Tok, diag::err_asm_operand_wide_string_literal)
- << (Tok.getKind() == tok::wide_string_literal)
- << SourceRange(L, L);
+ << SL->isWide()
+ << SL->getSourceRange();
return ExprError();
}
- default:
- Diag(Tok, diag::err_expected_string_literal)
- << /*Source='in...'*/0 << "'asm'";
- return ExprError();
}
-
- return ParseStringLiteralExpression();
+ return AsmString;
}
/// ParseSimpleAsm
@@ -1246,7 +1248,7 @@ Parser::ExprResult Parser::ParseAsmStringLiteral() {
/// [GNU] simple-asm-expr:
/// 'asm' '(' asm-string-literal ')'
///
-Parser::ExprResult Parser::ParseSimpleAsm(SourceLocation *EndLoc) {
+ExprResult Parser::ParseSimpleAsm(SourceLocation *EndLoc) {
assert(Tok.is(tok::kw_asm) && "Not an asm!");
SourceLocation Loc = ConsumeToken();
@@ -1321,7 +1323,7 @@ void Parser::AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation) {
/// no typo correction will be performed.
Parser::AnnotatedNameKind
Parser::TryAnnotateName(bool IsAddressOfOperand,
- CorrectionCandidateCallback *CCC) {
+ std::unique_ptr<CorrectionCandidateCallback> CCC) {
assert(Tok.is(tok::identifier) || Tok.is(tok::annot_cxxscope));
const bool EnteringContext = false;
@@ -1359,9 +1361,9 @@ Parser::TryAnnotateName(bool IsAddressOfOperand,
// after a scope specifier, because in general we can't recover from typos
// there (eg, after correcting 'A::tempalte B<X>::C' [sic], we would need to
// jump back into scope specifier parsing).
- Sema::NameClassification Classification
- = Actions.ClassifyName(getCurScope(), SS, Name, NameLoc, Next,
- IsAddressOfOperand, SS.isEmpty() ? CCC : nullptr);
+ Sema::NameClassification Classification = Actions.ClassifyName(
+ getCurScope(), SS, Name, NameLoc, Next, IsAddressOfOperand,
+ SS.isEmpty() ? std::move(CCC) : nullptr);
switch (Classification.getKind()) {
case Sema::NC_Error:
@@ -1431,34 +1433,16 @@ Parser::TryAnnotateName(bool IsAddressOfOperand,
}
bool Parser::TryKeywordIdentFallback(bool DisableKeyword) {
- assert(!Tok.is(tok::identifier) && !Tok.isAnnotation());
+ assert(Tok.isNot(tok::identifier));
Diag(Tok, diag::ext_keyword_as_ident)
<< PP.getSpelling(Tok)
<< DisableKeyword;
- if (DisableKeyword) {
- IdentifierInfo *II = Tok.getIdentifierInfo();
- ContextualKeywords[II] = Tok.getKind();
- II->RevertTokenIDToIdentifier();
- }
+ if (DisableKeyword)
+ Tok.getIdentifierInfo()->RevertTokenIDToIdentifier();
Tok.setKind(tok::identifier);
return true;
}
-bool Parser::TryIdentKeywordUpgrade() {
- assert(Tok.is(tok::identifier));
- const IdentifierInfo *II = Tok.getIdentifierInfo();
- assert(II->hasRevertedTokenIDToIdentifier());
- // If we find that this is in fact the name of a type trait,
- // update the token kind in place and parse again to treat it as
- // the appropriate kind of type trait.
- llvm::SmallDenseMap<const IdentifierInfo *, tok::TokenKind>::iterator Known =
- ContextualKeywords.find(II);
- if (Known == ContextualKeywords.end())
- return false;
- Tok.setKind(Known->second);
- return true;
-}
-
/// TryAnnotateTypeOrScopeToken - If the current token position is on a
/// typename (possibly qualified in C++) or a C++ scope specifier not followed
/// by a typename, TryAnnotateTypeOrScopeToken will replace one or more tokens
@@ -1482,10 +1466,11 @@ bool Parser::TryIdentKeywordUpgrade() {
/// Note that this routine emits an error if you call it with ::new or ::delete
/// as the current tokens, so only call it in contexts where these are invalid.
bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
- assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon)
- || Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope)
- || Tok.is(tok::kw_decltype) || Tok.is(tok::annot_template_id))
- && "Cannot be a type or scope token!");
+ assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
+ Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope) ||
+ Tok.is(tok::kw_decltype) || Tok.is(tok::annot_template_id) ||
+ Tok.is(tok::kw___super)) &&
+ "Cannot be a type or scope token!");
if (Tok.is(tok::kw_typename)) {
// MSVC lets you do stuff like:
@@ -1694,7 +1679,8 @@ bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
"Call sites of this function should be guarded by checking for C++");
assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) ||
- Tok.is(tok::kw_decltype)) && "Cannot be a type or scope token!");
+ Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super)) &&
+ "Cannot be a type or scope token!");
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext))
@@ -1739,7 +1725,8 @@ SourceLocation Parser::handleUnexpectedCodeCompletionToken() {
for (Scope *S = getCurScope(); S; S = S->getParent()) {
if (S->getFlags() & Scope::FnScope) {
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_RecoveryInFunction);
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ Sema::PCC_RecoveryInFunction);
cutOffParsing();
return PrevTokLocation;
}
@@ -1756,13 +1743,6 @@ SourceLocation Parser::handleUnexpectedCodeCompletionToken() {
return PrevTokLocation;
}
-// Anchor the Parser::FieldCallback vtable to this translation unit.
-// We use a spurious method instead of the destructor because
-// destroying FieldCallbacks can actually be slightly
-// performance-sensitive.
-void Parser::FieldCallback::_anchor() {
-}
-
// Code-completion pass-through functions
void Parser::CodeCompleteDirective(bool InConditional) {
@@ -1784,7 +1764,7 @@ void Parser::CodeCompletePreprocessorExpression() {
void Parser::CodeCompleteMacroArgument(IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned ArgumentIndex) {
- Actions.CodeCompletePreprocessorMacroArgument(getCurScope(), Macro, MacroInfo,
+ Actions.CodeCompletePreprocessorMacroArgument(getCurScope(), Macro, MacroInfo,
ArgumentIndex);
}
@@ -1806,8 +1786,9 @@ bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
}
// Parse nested-name-specifier.
- ParseOptionalCXXScopeSpecifier(Result.SS, ParsedType(),
- /*EnteringContext=*/false);
+ if (getLangOpts().CPlusPlus)
+ ParseOptionalCXXScopeSpecifier(Result.SS, ParsedType(),
+ /*EnteringContext=*/false);
// Check nested-name specifier.
if (Result.SS.isInvalid()) {
@@ -1828,7 +1809,7 @@ bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
// Check if the symbol exists.
switch (Actions.CheckMicrosoftIfExistsSymbol(getCurScope(), Result.KeywordLoc,
- Result.IsIfExists, Result.SS,
+ Result.IsIfExists, Result.SS,
Result.Name)) {
case Sema::IER_Exists:
Result.Behavior = Result.IsIfExists ? IEB_Parse : IEB_Skip;
diff --git a/lib/Parse/RAIIObjectsForParser.h b/lib/Parse/RAIIObjectsForParser.h
index 4f5f3ab81036..a0c9c1fb6c6f 100644
--- a/lib/Parse/RAIIObjectsForParser.h
+++ b/lib/Parse/RAIIObjectsForParser.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_PARSE_RAII_OBJECTS_FOR_PARSER_H
-#define LLVM_CLANG_PARSE_RAII_OBJECTS_FOR_PARSER_H
+#ifndef LLVM_CLANG_LIB_PARSE_RAIIOBJECTSFORPARSER_H
+#define LLVM_CLANG_LIB_PARSE_RAIIOBJECTSFORPARSER_H
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
diff --git a/lib/Rewrite/CMakeLists.txt b/lib/Rewrite/CMakeLists.txt
index 0c77536012aa..16550b1b710e 100644
--- a/lib/Rewrite/CMakeLists.txt
+++ b/lib/Rewrite/CMakeLists.txt
@@ -10,7 +10,6 @@ add_clang_library(clangRewrite
TokenRewriter.cpp
LINK_LIBS
- clangAST
clangBasic
clangLex
)
diff --git a/lib/Rewrite/RewriteRope.cpp b/lib/Rewrite/RewriteRope.cpp
index ef8abfcadc03..1c82ee4a67ec 100644
--- a/lib/Rewrite/RewriteRope.cpp
+++ b/lib/Rewrite/RewriteRope.cpp
@@ -788,18 +788,14 @@ RopePiece RewriteRope::MakeRopeString(const char *Start, const char *End) {
// Otherwise, this was a small request but we just don't have space for it
// Make a new chunk and share it with later allocations.
- if (AllocBuffer)
- AllocBuffer->dropRef();
-
unsigned AllocSize = offsetof(RopeRefCountString, Data) + AllocChunkSize;
- AllocBuffer = reinterpret_cast<RopeRefCountString *>(new char[AllocSize]);
- AllocBuffer->RefCount = 0;
- memcpy(AllocBuffer->Data, Start, Len);
+ RopeRefCountString *Res =
+ reinterpret_cast<RopeRefCountString *>(new char[AllocSize]);
+ Res->RefCount = 0;
+ memcpy(Res->Data, Start, Len);
+ AllocBuffer = Res;
AllocOffs = Len;
- // Start out the new allocation with a refcount of 1, since we have an
- // internal reference to it.
- AllocBuffer->addRef();
return RopePiece(AllocBuffer, 0, Len);
}
diff --git a/lib/Rewrite/Rewriter.cpp b/lib/Rewrite/Rewriter.cpp
index eab4ccfeadc3..60cdcf703172 100644
--- a/lib/Rewrite/Rewriter.cpp
+++ b/lib/Rewrite/Rewriter.cpp
@@ -13,9 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Core/Rewriter.h"
-#include "clang/AST/Decl.h"
-#include "clang/AST/PrettyPrinter.h"
-#include "clang/AST/Stmt.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticIDs.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
@@ -328,35 +326,6 @@ bool Rewriter::ReplaceText(SourceRange range, SourceRange replacementRange) {
return ReplaceText(start, origLength, MB.substr(newOffs, newLength));
}
-/// ReplaceStmt - This replaces a Stmt/Expr with another, using the pretty
-/// printer to generate the replacement code. This returns true if the input
-/// could not be rewritten, or false if successful.
-bool Rewriter::ReplaceStmt(Stmt *From, Stmt *To) {
- assert(From != nullptr && To != nullptr && "Expected non-null Stmt's");
-
- // Measaure the old text.
- int Size = getRangeSize(From->getSourceRange());
- if (Size == -1)
- return true;
-
- // Get the new text.
- std::string SStr;
- llvm::raw_string_ostream S(SStr);
- To->printPretty(S, nullptr, PrintingPolicy(*LangOpts));
- const std::string &Str = S.str();
-
- ReplaceText(From->getLocStart(), Size, Str);
- return false;
-}
-
-std::string Rewriter::ConvertToString(Stmt *From) {
- assert(From != nullptr && "Expected non-null Stmt");
- std::string SStr;
- llvm::raw_string_ostream S(SStr);
- From->printPretty(S, nullptr, PrintingPolicy(*LangOpts));
- return S.str();
-}
-
bool Rewriter::IncreaseIndentation(CharSourceRange range,
SourceLocation parentIndent) {
if (range.isInvalid()) return true;
diff --git a/lib/Sema/AnalysisBasedWarnings.cpp b/lib/Sema/AnalysisBasedWarnings.cpp
index 213d6fbffffc..f666a9b46384 100644
--- a/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/lib/Sema/AnalysisBasedWarnings.cpp
@@ -530,44 +530,37 @@ static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn))
return;
- // FIXME: Function try block
- if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) {
- switch (CheckFallThrough(AC)) {
- case UnknownFallThrough:
- break;
+ SourceLocation LBrace = Body->getLocStart(), RBrace = Body->getLocEnd();
+ // Either in a function body compound statement, or a function-try-block.
+ switch (CheckFallThrough(AC)) {
+ case UnknownFallThrough:
+ break;
- case MaybeFallThrough:
- if (HasNoReturn)
- S.Diag(Compound->getRBracLoc(),
- CD.diag_MaybeFallThrough_HasNoReturn);
- else if (!ReturnsVoid)
- S.Diag(Compound->getRBracLoc(),
- CD.diag_MaybeFallThrough_ReturnsNonVoid);
- break;
- case AlwaysFallThrough:
- if (HasNoReturn)
- S.Diag(Compound->getRBracLoc(),
- CD.diag_AlwaysFallThrough_HasNoReturn);
- else if (!ReturnsVoid)
- S.Diag(Compound->getRBracLoc(),
- CD.diag_AlwaysFallThrough_ReturnsNonVoid);
- break;
- case NeverFallThroughOrReturn:
- if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn)
- << 0 << FD;
- } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
- S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn)
- << 1 << MD;
- } else {
- S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn);
- }
+ case MaybeFallThrough:
+ if (HasNoReturn)
+ S.Diag(RBrace, CD.diag_MaybeFallThrough_HasNoReturn);
+ else if (!ReturnsVoid)
+ S.Diag(RBrace, CD.diag_MaybeFallThrough_ReturnsNonVoid);
+ break;
+ case AlwaysFallThrough:
+ if (HasNoReturn)
+ S.Diag(RBrace, CD.diag_AlwaysFallThrough_HasNoReturn);
+ else if (!ReturnsVoid)
+ S.Diag(RBrace, CD.diag_AlwaysFallThrough_ReturnsNonVoid);
+ break;
+ case NeverFallThroughOrReturn:
+ if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 0 << FD;
+ } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 1 << MD;
+ } else {
+ S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn);
}
- break;
- case NeverFallThrough:
- break;
- }
+ }
+ break;
+ case NeverFallThrough:
+ break;
}
}
@@ -923,7 +916,7 @@ namespace {
// issue a warn_fallthrough_attr_unreachable for them.
for (const auto *B : *Cfg) {
const Stmt *L = B->getLabel();
- if (L && isa<SwitchCase>(L) && ReachableBlocks.insert(B))
+ if (L && isa<SwitchCase>(L) && ReachableBlocks.insert(B).second)
BlockQueue.push_back(B);
}
@@ -933,7 +926,7 @@ namespace {
for (CFGBlock::const_succ_iterator I = P->succ_begin(),
E = P->succ_end();
I != E; ++I) {
- if (*I && ReachableBlocks.insert(*I))
+ if (*I && ReachableBlocks.insert(*I).second)
BlockQueue.push_back(*I);
}
}
@@ -1444,13 +1437,51 @@ struct SortDiagBySourceLocation {
// -Wthread-safety
//===----------------------------------------------------------------------===//
namespace clang {
-namespace thread_safety {
-namespace {
-class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
+namespace threadSafety {
+
+class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
Sema &S;
DiagList Warnings;
SourceLocation FunLocation, FunEndLocation;
+ const FunctionDecl *CurrentFunction;
+ bool Verbose;
+
+ OptionalNotes getNotes() const {
+ if (Verbose && CurrentFunction) {
+ PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(),
+ S.PDiag(diag::note_thread_warning_in_fun)
+ << CurrentFunction->getNameAsString());
+ return OptionalNotes(1, FNote);
+ }
+ return OptionalNotes();
+ }
+
+ OptionalNotes getNotes(const PartialDiagnosticAt &Note) const {
+ OptionalNotes ONS(1, Note);
+ if (Verbose && CurrentFunction) {
+ PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(),
+ S.PDiag(diag::note_thread_warning_in_fun)
+ << CurrentFunction->getNameAsString());
+ ONS.push_back(FNote);
+ }
+ return ONS;
+ }
+
+ OptionalNotes getNotes(const PartialDiagnosticAt &Note1,
+ const PartialDiagnosticAt &Note2) const {
+ OptionalNotes ONS;
+ ONS.push_back(Note1);
+ ONS.push_back(Note2);
+ if (Verbose && CurrentFunction) {
+ PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(),
+ S.PDiag(diag::note_thread_warning_in_fun)
+ << CurrentFunction->getNameAsString());
+ ONS.push_back(FNote);
+ }
+ return ONS;
+ }
+
// Helper functions
void warnLockMismatch(unsigned DiagID, StringRef Kind, Name LockName,
SourceLocation Loc) {
@@ -1459,12 +1490,15 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
if (!Loc.isValid())
Loc = FunLocation;
PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind << LockName);
- Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+ Warnings.push_back(DelayedDiag(Warning, getNotes()));
}
public:
ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL)
- : S(S), FunLocation(FL), FunEndLocation(FEL) {}
+ : S(S), FunLocation(FL), FunEndLocation(FEL),
+ CurrentFunction(nullptr), Verbose(false) {}
+
+ void setVerbose(bool b) { Verbose = b; }
/// \brief Emit all buffered diagnostics in order of sourcelocation.
/// We need to output diagnostics produced while iterating through
@@ -1482,12 +1516,14 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
void handleInvalidLockExp(StringRef Kind, SourceLocation Loc) override {
PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_cannot_resolve_lock)
<< Loc);
- Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+ Warnings.push_back(DelayedDiag(Warning, getNotes()));
}
+
void handleUnmatchedUnlock(StringRef Kind, Name LockName,
SourceLocation Loc) override {
warnLockMismatch(diag::warn_unlock_but_no_lock, Kind, LockName, Loc);
}
+
void handleIncorrectUnlockKind(StringRef Kind, Name LockName,
LockKind Expected, LockKind Received,
SourceLocation Loc) override {
@@ -1496,8 +1532,9 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_unlock_kind_mismatch)
<< Kind << LockName << Received
<< Expected);
- Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+ Warnings.push_back(DelayedDiag(Warning, getNotes()));
}
+
void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation Loc) override {
warnLockMismatch(diag::warn_double_lock, Kind, LockName, Loc);
}
@@ -1529,10 +1566,10 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
if (LocLocked.isValid()) {
PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here)
<< Kind);
- Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
+ Warnings.push_back(DelayedDiag(Warning, getNotes(Note)));
return;
}
- Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+ Warnings.push_back(DelayedDiag(Warning, getNotes()));
}
void handleExclusiveAndShared(StringRef Kind, Name LockName,
@@ -1543,7 +1580,7 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
<< Kind << LockName);
PartialDiagnosticAt Note(Loc2, S.PDiag(diag::note_lock_exclusive_and_shared)
<< Kind << LockName);
- Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
+ Warnings.push_back(DelayedDiag(Warning, getNotes(Note)));
}
void handleNoMutexHeld(StringRef Kind, const NamedDecl *D,
@@ -1556,7 +1593,7 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
diag::warn_var_deref_requires_any_lock;
PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
<< D->getNameAsString() << getLockKindFromAccessKind(AK));
- Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+ Warnings.push_back(DelayedDiag(Warning, getNotes()));
}
void handleMutexNotHeld(StringRef Kind, const NamedDecl *D,
@@ -1575,13 +1612,25 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
case POK_FunctionCall:
DiagID = diag::warn_fun_requires_lock_precise;
break;
+ case POK_PassByRef:
+ DiagID = diag::warn_guarded_pass_by_reference;
+ break;
+ case POK_PtPassByRef:
+ DiagID = diag::warn_pt_guarded_pass_by_reference;
+ break;
}
PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind
<< D->getNameAsString()
<< LockName << LK);
PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match)
<< *PossibleMatch);
- Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
+ if (Verbose && POK == POK_VarAccess) {
+ PartialDiagnosticAt VNote(D->getLocation(),
+ S.PDiag(diag::note_guarded_by_declared_here)
+ << D->getNameAsString());
+ Warnings.push_back(DelayedDiag(Warning, getNotes(Note, VNote)));
+ } else
+ Warnings.push_back(DelayedDiag(Warning, getNotes(Note)));
} else {
switch (POK) {
case POK_VarAccess:
@@ -1593,22 +1642,52 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
case POK_FunctionCall:
DiagID = diag::warn_fun_requires_lock;
break;
+ case POK_PassByRef:
+ DiagID = diag::warn_guarded_pass_by_reference;
+ break;
+ case POK_PtPassByRef:
+ DiagID = diag::warn_pt_guarded_pass_by_reference;
+ break;
}
PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind
<< D->getNameAsString()
<< LockName << LK);
- Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+ if (Verbose && POK == POK_VarAccess) {
+ PartialDiagnosticAt Note(D->getLocation(),
+ S.PDiag(diag::note_guarded_by_declared_here)
+ << D->getNameAsString());
+ Warnings.push_back(DelayedDiag(Warning, getNotes(Note)));
+ } else
+ Warnings.push_back(DelayedDiag(Warning, getNotes()));
}
}
+
+ virtual void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg,
+ SourceLocation Loc) override {
+ PartialDiagnosticAt Warning(Loc,
+ S.PDiag(diag::warn_acquire_requires_negative_cap)
+ << Kind << LockName << Neg);
+ Warnings.push_back(DelayedDiag(Warning, getNotes()));
+ }
+
+
void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName,
SourceLocation Loc) override {
PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_fun_excludes_mutex)
<< Kind << FunName << LockName);
- Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+ Warnings.push_back(DelayedDiag(Warning, getNotes()));
+ }
+
+ void enterFunction(const FunctionDecl* FD) override {
+ CurrentFunction = FD;
+ }
+
+ void leaveFunction(const FunctionDecl* FD) override {
+ CurrentFunction = 0;
}
};
-}
+
}
}
@@ -1896,11 +1975,13 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
if (P.enableThreadSafetyAnalysis) {
SourceLocation FL = AC.getDecl()->getLocation();
SourceLocation FEL = AC.getDecl()->getLocEnd();
- thread_safety::ThreadSafetyReporter Reporter(S, FL, FEL);
+ threadSafety::ThreadSafetyReporter Reporter(S, FL, FEL);
if (!Diags.isIgnored(diag::warn_thread_safety_beta, D->getLocStart()))
Reporter.setIssueBetaWarnings(true);
+ if (!Diags.isIgnored(diag::warn_thread_safety_verbose, D->getLocStart()))
+ Reporter.setVerbose(true);
- thread_safety::runThreadSafetyAnalysis(AC, Reporter);
+ threadSafety::runThreadSafetyAnalysis(AC, Reporter);
Reporter.emitDiagnostics();
}
diff --git a/lib/Sema/AttributeList.cpp b/lib/Sema/AttributeList.cpp
index 476a22b628ad..34af6cf63c87 100644
--- a/lib/Sema/AttributeList.cpp
+++ b/lib/Sema/AttributeList.cpp
@@ -206,3 +206,11 @@ bool AttributeList::isKnownToGCC() const {
unsigned AttributeList::getSemanticSpelling() const {
return getInfo(*this).SpellingIndexToSemanticSpelling(*this);
}
+
+bool AttributeList::hasVariadicArg() const {
+ // If the attribute has the maximum number of optional arguments, we will
+ // claim that as being variadic. If we someday get an attribute that
+ // legitimately bumps up against that maximum, we can use another bit to track
+ // whether it's truly variadic or not.
+ return getInfo(*this).OptArgs == 15;
+}
diff --git a/lib/Sema/CMakeLists.txt b/lib/Sema/CMakeLists.txt
index 7847d2c36e5b..4a772d8972a9 100644
--- a/lib/Sema/CMakeLists.txt
+++ b/lib/Sema/CMakeLists.txt
@@ -21,6 +21,7 @@ add_clang_library(clangSema
SemaChecking.cpp
SemaCodeComplete.cpp
SemaConsumer.cpp
+ SemaCUDA.cpp
SemaDecl.cpp
SemaDeclAttr.cpp
SemaDeclCXX.cpp
diff --git a/lib/Sema/DeclSpec.cpp b/lib/Sema/DeclSpec.cpp
index d7372b7a2768..7bf3e51999b6 100644
--- a/lib/Sema/DeclSpec.cpp
+++ b/lib/Sema/DeclSpec.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Parse/ParseDiagnostic.h" // FIXME: remove this back-dependency!
#include "clang/Sema/LocInfoType.h"
@@ -113,6 +114,18 @@ void CXXScopeSpec::MakeGlobal(ASTContext &Context,
"NestedNameSpecifierLoc range computation incorrect");
}
+void CXXScopeSpec::MakeSuper(ASTContext &Context, CXXRecordDecl *RD,
+ SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc) {
+ Builder.MakeSuper(Context, RD, SuperLoc, ColonColonLoc);
+
+ Range.setBegin(SuperLoc);
+ Range.setEnd(ColonColonLoc);
+
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
+}
+
void CXXScopeSpec::MakeTrivial(ASTContext &Context,
NestedNameSpecifier *Qualifier, SourceRange R) {
Builder.MakeTrivial(Context, Qualifier, R);
@@ -159,6 +172,8 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
SourceLocation ConstQualifierLoc,
SourceLocation
VolatileQualifierLoc,
+ SourceLocation
+ RestrictQualifierLoc,
SourceLocation MutableLoc,
ExceptionSpecificationType
ESpecType,
@@ -167,6 +182,7 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
SourceRange *ExceptionRanges,
unsigned NumExceptions,
Expr *NoexceptExpr,
+ CachedTokens *ExceptionSpecTokens,
SourceLocation LocalRangeBegin,
SourceLocation LocalRangeEnd,
Declarator &TheDeclarator,
@@ -193,6 +209,7 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
I.Fun.RefQualifierLoc = RefQualifierLoc.getRawEncoding();
I.Fun.ConstQualifierLoc = ConstQualifierLoc.getRawEncoding();
I.Fun.VolatileQualifierLoc = VolatileQualifierLoc.getRawEncoding();
+ I.Fun.RestrictQualifierLoc = RestrictQualifierLoc.getRawEncoding();
I.Fun.MutableLoc = MutableLoc.getRawEncoding();
I.Fun.ExceptionSpecType = ESpecType;
I.Fun.ExceptionSpecLoc = ESpecLoc.getRawEncoding();
@@ -203,6 +220,9 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
TrailingReturnType.isInvalid();
I.Fun.TrailingReturnType = TrailingReturnType.get();
+ assert(I.Fun.TypeQuals == TypeQuals && "bitfield overflow");
+ assert(I.Fun.ExceptionSpecType == ESpecType && "bitfield overflow");
+
// new[] a parameter array if needed.
if (NumParams) {
// If the 'InlineParams' in Declarator is unused and big enough, put our
@@ -239,6 +259,10 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
case EST_ComputedNoexcept:
I.Fun.NoexceptExpr = NoexceptExpr;
break;
+
+ case EST_Unparsed:
+ I.Fun.ExceptionSpecTokens = ExceptionSpecTokens;
+ break;
}
return I;
}
@@ -484,14 +508,14 @@ bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
case SCS_private_extern:
case SCS_static:
if (S.getLangOpts().OpenCLVersion < 120) {
- DiagID = diag::err_not_opencl_storage_class_specifier;
+ DiagID = diag::err_opencl_unknown_type_specifier;
PrevSpec = getSpecifierName(SC);
return true;
}
break;
case SCS_auto:
case SCS_register:
- DiagID = diag::err_not_opencl_storage_class_specifier;
+ DiagID = diag::err_opencl_unknown_type_specifier;
PrevSpec = getSpecifierName(SC);
return true;
default:
@@ -553,12 +577,6 @@ bool DeclSpec::SetTypeSpecWidth(TSW W, SourceLocation Loc,
else if (W != TSW_longlong || TypeSpecWidth != TSW_long)
return BadSpecifier(W, (TSW)TypeSpecWidth, PrevSpec, DiagID);
TypeSpecWidth = W;
- if (TypeAltiVecVector && !TypeAltiVecBool &&
- ((TypeSpecWidth == TSW_long) || (TypeSpecWidth == TSW_longlong))) {
- PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
- DiagID = diag::warn_vector_long_decl_spec_combination;
- return true;
- }
return false;
}
@@ -680,11 +698,6 @@ bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
}
TypeSpecType = T;
TypeSpecOwned = false;
- if (TypeAltiVecVector && !TypeAltiVecBool && (TypeSpecType == TST_double)) {
- PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
- DiagID = diag::err_invalid_vector_decl_spec;
- return true;
- }
return false;
}
@@ -978,6 +991,16 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
if ((TypeSpecType == TST_char) || (TypeSpecType == TST_int) ||
(TypeSpecWidth != TSW_unspecified))
TypeSpecSign = TSS_unsigned;
+ } else if (TypeSpecType == TST_double) {
+ // vector long double and vector long long double are never allowed.
+ // vector double is OK for Power7 and later.
+ if (TypeSpecWidth == TSW_long || TypeSpecWidth == TSW_longlong)
+ Diag(D, TSWLoc, diag::err_invalid_vector_long_double_decl_spec);
+ else if (!PP.getTargetInfo().hasFeature("vsx"))
+ Diag(D, TSTLoc, diag::err_invalid_vector_double_decl_spec);
+ } else if (TypeSpecWidth == TSW_long) {
+ Diag(D, TSWLoc, diag::warn_vector_long_decl_spec_combination)
+ << getSpecifierName((TST)TypeSpecType, Policy);
}
if (TypeAltiVecPixel) {
diff --git a/lib/Sema/IdentifierResolver.cpp b/lib/Sema/IdentifierResolver.cpp
index 2a5bacff0d93..6586fb32787c 100644
--- a/lib/Sema/IdentifierResolver.cpp
+++ b/lib/Sema/IdentifierResolver.cpp
@@ -130,6 +130,9 @@ bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx, Scope *S,
return false;
}
+ // FIXME: If D is a local extern declaration, this check doesn't make sense;
+ // we should be checking its lexical context instead in that case, because
+ // that is its scope.
DeclContext *DCtx = D->getDeclContext()->getRedeclContext();
return AllowInlineNamespace ? Ctx->InEnclosingNamespaceSetOf(DCtx)
: Ctx->Equals(DCtx);
diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp
index 255845282dfa..fd75c02bb1ed 100644
--- a/lib/Sema/JumpDiagnostics.cpp
+++ b/lib/Sema/JumpDiagnostics.cpp
@@ -84,6 +84,7 @@ private:
void CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
unsigned JumpDiag, unsigned JumpDiagWarning,
unsigned JumpDiagCXX98Compat);
+ void CheckGotoStmt(GotoStmt *GS);
unsigned GetDeepestCommonScope(unsigned A, unsigned B);
};
@@ -489,10 +490,14 @@ void JumpScopeChecker::VerifyJumps() {
// With a goto,
if (GotoStmt *GS = dyn_cast<GotoStmt>(Jump)) {
- CheckJump(GS, GS->getLabel()->getStmt(), GS->getGotoLoc(),
- diag::err_goto_into_protected_scope,
- diag::ext_goto_into_protected_scope,
- diag::warn_cxx98_compat_goto_into_protected_scope);
+ // The label may not have a statement if it's coming from inline MS ASM.
+ if (GS->getLabel()->getStmt()) {
+ CheckJump(GS, GS->getLabel()->getStmt(), GS->getGotoLoc(),
+ diag::err_goto_into_protected_scope,
+ diag::ext_goto_into_protected_scope,
+ diag::warn_cxx98_compat_goto_into_protected_scope);
+ }
+ CheckGotoStmt(GS);
continue;
}
@@ -789,6 +794,15 @@ void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
}
}
+void JumpScopeChecker::CheckGotoStmt(GotoStmt *GS) {
+ if (GS->getLabel()->isMSAsmLabel()) {
+ S.Diag(GS->getGotoLoc(), diag::err_goto_ms_asm_label)
+ << GS->getLabel()->getIdentifier();
+ S.Diag(GS->getLabel()->getLocation(), diag::note_goto_ms_asm_label)
+ << GS->getLabel()->getIdentifier();
+ }
+}
+
void Sema::DiagnoseInvalidJumps(Stmt *Body) {
(void)JumpScopeChecker(Body, *this);
}
diff --git a/lib/Sema/MultiplexExternalSemaSource.cpp b/lib/Sema/MultiplexExternalSemaSource.cpp
index 97237dbf097d..449ddf43114d 100644
--- a/lib/Sema/MultiplexExternalSemaSource.cpp
+++ b/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -242,6 +242,12 @@ void MultiplexExternalSemaSource::ReadDynamicClasses(
Sources[i]->ReadDynamicClasses(Decls);
}
+void MultiplexExternalSemaSource::ReadUnusedLocalTypedefNameCandidates(
+ llvm::SmallSetVector<const TypedefNameDecl *, 4> &Decls) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadUnusedLocalTypedefNameCandidates(Decls);
+}
+
void MultiplexExternalSemaSource::ReadLocallyScopedExternCDecls(
SmallVectorImpl<NamedDecl*> &Decls) {
for(size_t i = 0; i < Sources.size(); ++i)
diff --git a/lib/Sema/Scope.cpp b/lib/Sema/Scope.cpp
index 35e2075b3826..6c7977882386 100644
--- a/lib/Sema/Scope.cpp
+++ b/lib/Sema/Scope.cpp
@@ -39,9 +39,6 @@ void Scope::Init(Scope *parent, unsigned flags) {
BlockParent = parent->BlockParent;
TemplateParamParent = parent->TemplateParamParent;
MSLocalManglingParent = parent->MSLocalManglingParent;
- SEHTryParent = parent->SEHTryParent;
- if (parent->Flags & SEHTryScope)
- SEHTryParent = parent;
if ((Flags & (FnScope | ClassScope | BlockScope | TemplateParamScope |
FunctionPrototypeScope | AtCatchScope | ObjCMethodScope)) ==
0)
@@ -50,17 +47,13 @@ void Scope::Init(Scope *parent, unsigned flags) {
Depth = 0;
PrototypeDepth = 0;
PrototypeIndex = 0;
- SEHTryParent = MSLocalManglingParent = FnParent = BlockParent = nullptr;
+ MSLocalManglingParent = FnParent = BlockParent = nullptr;
TemplateParamParent = nullptr;
MSLocalManglingNumber = 1;
}
// If this scope is a function or contains breaks/continues, remember it.
if (flags & FnScope) FnParent = this;
- SEHTryIndexPool = 0;
- SEHTryIndex = -1;
- if (flags & SEHTryScope)
- SEHTryIndex = FnParent ? FnParent->SEHTryIndexPool++ : -1;
// The MS mangler uses the number of scopes that can hold declarations as
// part of an external name.
if (Flags & (ClassScope | FnScope)) {
diff --git a/lib/Sema/ScopeInfo.cpp b/lib/Sema/ScopeInfo.cpp
index 4d079e705f62..63ef3b2355fb 100644
--- a/lib/Sema/ScopeInfo.cpp
+++ b/lib/Sema/ScopeInfo.cpp
@@ -14,6 +14,7 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
@@ -38,6 +39,7 @@ void FunctionScopeInfo::Clear() {
ErrorTrap.reset();
PossiblyUnreachableDiags.clear();
WeakObjectUses.clear();
+ ModifiedNonNullParams.clear();
}
static const NamedDecl *getBestPropertyDecl(const ObjCPropertyRefExpr *PropE) {
@@ -93,6 +95,21 @@ FunctionScopeInfo::WeakObjectProfileTy::getBaseInfo(const Expr *E) {
return BaseInfoTy(D, IsExact);
}
+bool CapturingScopeInfo::isVLATypeCaptured(const VariableArrayType *VAT) const {
+ RecordDecl *RD = nullptr;
+ if (auto *LSI = dyn_cast<LambdaScopeInfo>(this))
+ RD = LSI->Lambda;
+ else if (auto CRSI = dyn_cast<CapturedRegionScopeInfo>(this))
+ RD = CRSI->TheRecordDecl;
+
+ if (RD)
+ for (auto *FD : RD->fields()) {
+ if (FD->hasCapturedVLAType() && FD->getCapturedVLAType() == VAT)
+ return true;
+ }
+ return false;
+}
+
FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy(
const ObjCPropertyRefExpr *PropE)
: Base(nullptr, true), Property(getBestPropertyDecl(PropE)) {
@@ -159,6 +176,8 @@ void FunctionScopeInfo::markSafeWeakUse(const Expr *E) {
// Has this weak object been seen before?
FunctionScopeInfo::WeakObjectUseMap::iterator Uses;
if (const ObjCPropertyRefExpr *RefExpr = dyn_cast<ObjCPropertyRefExpr>(E)) {
+ if (!RefExpr->isObjectReceiver())
+ return;
if (isa<OpaqueValueExpr>(RefExpr->getBase()))
Uses = WeakObjectUses.find(WeakObjectProfileTy(RefExpr));
else {
diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp
index 2c653325b37b..b9aaf1616d68 100644
--- a/lib/Sema/Sema.cpp
+++ b/lib/Sema/Sema.cpp
@@ -69,8 +69,6 @@ PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
void Sema::ActOnTranslationUnitScope(Scope *S) {
TUScope = S;
PushDeclContext(S, Context.getTranslationUnitDecl());
-
- VAListTagName = PP.getIdentifierInfo("__va_list_tag");
}
Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
@@ -90,12 +88,14 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
CodeSegStack(nullptr), CurInitSeg(nullptr), VisContext(nullptr),
IsBuildingRecoveryCallExpr(false),
ExprNeedsCleanups(false), LateTemplateParser(nullptr),
+ LateTemplateParserCleanup(nullptr),
OpaqueParser(nullptr), IdResolver(pp), StdInitializerList(nullptr),
CXXTypeInfoDecl(nullptr), MSVCGuidDecl(nullptr),
NSNumberDecl(nullptr),
NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr),
NSArrayDecl(nullptr), ArrayWithObjectsMethod(nullptr),
NSDictionaryDecl(nullptr), DictionaryWithObjectsMethod(nullptr),
+ MSAsmLabelNameCounter(0),
GlobalNewDeleteDeclared(false),
TUKind(TUKind),
NumSFINAEErrors(0),
@@ -151,6 +151,10 @@ void Sema::Initialize() {
= dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
ExternalSema->InitializeSema(*this);
+ // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
+ // will not be able to merge any duplicate __va_list_tag decls correctly.
+ VAListTagName = PP.getIdentifierInfo("__va_list_tag");
+
// Initialize predefined 128-bit integer types, if needed.
if (Context.getTargetInfo().hasInt128Type()) {
// If either of the 128-bit integer types are unavailable to name lookup,
@@ -241,6 +245,8 @@ Sema::~Sema() {
// Destroys data sharing attributes stack for OpenMP
DestroyDataSharingAttributesStack();
+
+ assert(DelayedTypos.empty() && "Uncorrected typos!");
}
/// makeUnavailableInSystemHeader - There is an error in the current
@@ -538,7 +544,12 @@ static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I))
Complete = M->isDefined() || (M->isPure() && !isa<CXXDestructorDecl>(M));
else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I))
- Complete = F->getTemplatedDecl()->isDefined();
+ // If the template function is marked as late template parsed at this point,
+ // it has not been instantiated and therefore we have not performed semantic
+ // analysis on it yet, so we cannot know if the type can be considered
+ // complete.
+ Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
+ F->getTemplatedDecl()->isDefined();
else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) {
if (R->isInjectedClassName())
continue;
@@ -591,6 +602,19 @@ static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
return Complete;
}
+void Sema::emitAndClearUnusedLocalTypedefWarnings() {
+ if (ExternalSource)
+ ExternalSource->ReadUnusedLocalTypedefNameCandidates(
+ UnusedLocalTypedefNameCandidates);
+ for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
+ if (TD->isReferenced())
+ continue;
+ Diag(TD->getLocation(), diag::warn_unused_local_typedef)
+ << isa<TypeAliasDecl>(TD) << TD->getDeclName();
+ }
+ UnusedLocalTypedefNameCandidates.clear();
+}
+
/// ActOnEndOfTranslationUnit - This is called at the very end of the
/// translation unit when EOF is reached and all but the top-level scope is
/// popped.
@@ -647,13 +671,16 @@ void Sema::ActOnEndOfTranslationUnit() {
}
PerformPendingInstantiations();
+ if (LateTemplateParserCleanup)
+ LateTemplateParserCleanup(OpaqueParser);
+
CheckDelayedMemberExceptionSpecs();
}
// All delayed member exception specs should be checked or we end up accepting
// incompatible declarations.
assert(DelayedDefaultedMemberExceptionSpecs.empty());
- assert(DelayedDestructorExceptionSpecChecks.empty());
+ assert(DelayedExceptionSpecChecks.empty());
// Remove file scoped decls that turned out to be used.
UnusedFileScopedDecls.erase(
@@ -713,6 +740,10 @@ void Sema::ActOnEndOfTranslationUnit() {
}
}
+ // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
+ // modules when they are built, not every time they are used.
+ emitAndClearUnusedLocalTypedefWarnings();
+
// Modules don't need any of the checking below.
TUScope = nullptr;
return;
@@ -740,7 +771,7 @@ void Sema::ActOnEndOfTranslationUnit() {
// If the tentative definition was completed, getActingDefinition() returns
// null. If we've already seen this variable before, insert()'s second
// return value is false.
- if (!VD || VD->isInvalidDecl() || !Seen.insert(VD))
+ if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second)
continue;
if (const IncompleteArrayType *ArrayT
@@ -788,8 +819,9 @@ void Sema::ActOnEndOfTranslationUnit() {
!FD->isInlineSpecified() &&
!SourceMgr.isInMainFile(
SourceMgr.getExpansionLoc(FD->getLocation())))
- Diag(DiagD->getLocation(), diag::warn_unneeded_static_internal_decl)
- << DiagD->getDeclName();
+ Diag(DiagD->getLocation(),
+ diag::warn_unneeded_static_internal_decl)
+ << DiagD->getDeclName();
else
Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
<< /*function*/0 << DiagD->getDeclName();
@@ -820,6 +852,8 @@ void Sema::ActOnEndOfTranslationUnit() {
if (ExternalSource)
ExternalSource->ReadUndefinedButUsed(UndefinedButUsed);
checkUndefinedButUsed(*this);
+
+ emitAndClearUnusedLocalTypedefWarnings();
}
if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
@@ -1432,8 +1466,8 @@ IdentifierInfo *Sema::getFloat128Identifier() const {
void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
CapturedRegionKind K) {
- CapturingScopeInfo *CSI = new CapturedRegionScopeInfo(getDiagnostics(), S, CD, RD,
- CD->getContextParam(), K);
+ CapturingScopeInfo *CSI = new CapturedRegionScopeInfo(
+ getDiagnostics(), S, CD, RD, CD->getContextParam(), K);
CSI->ReturnType = Context.VoidTy;
FunctionScopes.push_back(CSI);
}
diff --git a/lib/Sema/SemaAccess.cpp b/lib/Sema/SemaAccess.cpp
index ffdb0aa27a6b..37240c23b691 100644
--- a/lib/Sema/SemaAccess.cpp
+++ b/lib/Sema/SemaAccess.cpp
@@ -1749,14 +1749,14 @@ Sema::AccessResult Sema::CheckFriendAccess(NamedDecl *target) {
return AR_accessible;
CXXMethodDecl *method = cast<CXXMethodDecl>(target->getAsFunction());
- assert(method->getQualifier());
AccessTarget entity(Context, AccessTarget::Member,
cast<CXXRecordDecl>(target->getDeclContext()),
DeclAccessPair::make(target, access),
/*no instance context*/ QualType());
entity.setDiag(diag::err_access_friend_function)
- << method->getQualifierLoc().getSourceRange();
+ << (method->getQualifier() ? method->getQualifierLoc().getSourceRange()
+ : method->getNameInfo().getSourceRange());
// We need to bypass delayed-diagnostics because we might be called
// while the ParsingDeclarator is active.
diff --git a/lib/Sema/SemaAttr.cpp b/lib/Sema/SemaAttr.cpp
index a7d606d545ca..76297977ea0e 100644
--- a/lib/Sema/SemaAttr.cpp
+++ b/lib/Sema/SemaAttr.cpp
@@ -360,18 +360,18 @@ void Sema::PragmaStack<ValueType>::Act(SourceLocation PragmaLocation,
}
}
-bool Sema::UnifySection(const StringRef &SectionName,
+bool Sema::UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *Decl) {
- auto Section = SectionInfos.find(SectionName);
- if (Section == SectionInfos.end()) {
- SectionInfos[SectionName] =
- SectionInfo(Decl, SourceLocation(), SectionFlags);
+ auto Section = Context.SectionInfos.find(SectionName);
+ if (Section == Context.SectionInfos.end()) {
+ Context.SectionInfos[SectionName] =
+ ASTContext::SectionInfo(Decl, SourceLocation(), SectionFlags);
return false;
}
// A pre-declared section takes precedence w/o diagnostic.
if (Section->second.SectionFlags == SectionFlags ||
- !(Section->second.SectionFlags & PSF_Implicit))
+ !(Section->second.SectionFlags & ASTContext::PSF_Implicit))
return false;
auto OtherDecl = Section->second.Decl;
Diag(Decl->getLocation(), diag::err_section_conflict)
@@ -384,17 +384,17 @@ bool Sema::UnifySection(const StringRef &SectionName,
if (auto A = OtherDecl->getAttr<SectionAttr>())
if (A->isImplicit())
Diag(A->getLocation(), diag::note_pragma_entered_here);
- return false;
+ return true;
}
-bool Sema::UnifySection(const StringRef &SectionName,
+bool Sema::UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation) {
- auto Section = SectionInfos.find(SectionName);
- if (Section != SectionInfos.end()) {
+ auto Section = Context.SectionInfos.find(SectionName);
+ if (Section != Context.SectionInfos.end()) {
if (Section->second.SectionFlags == SectionFlags)
return false;
- if (!(Section->second.SectionFlags & PSF_Implicit)) {
+ if (!(Section->second.SectionFlags & ASTContext::PSF_Implicit)) {
Diag(PragmaSectionLocation, diag::err_section_conflict)
<< "this" << "a prior #pragma section";
Diag(Section->second.PragmaSectionLocation,
@@ -402,8 +402,8 @@ bool Sema::UnifySection(const StringRef &SectionName,
return true;
}
}
- SectionInfos[SectionName] =
- SectionInfo(nullptr, PragmaSectionLocation, SectionFlags);
+ Context.SectionInfos[SectionName] =
+ ASTContext::SectionInfo(nullptr, PragmaSectionLocation, SectionFlags);
return false;
}
diff --git a/lib/Sema/SemaCUDA.cpp b/lib/Sema/SemaCUDA.cpp
new file mode 100644
index 000000000000..64222fbf8ac8
--- /dev/null
+++ b/lib/Sema/SemaCUDA.cpp
@@ -0,0 +1,263 @@
+//===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// \brief This file implements semantic analysis for CUDA constructs.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace clang;
+
+ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
+ MultiExprArg ExecConfig,
+ SourceLocation GGGLoc) {
+ FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
+ if (!ConfigDecl)
+ return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
+ << "cudaConfigureCall");
+ QualType ConfigQTy = ConfigDecl->getType();
+
+ DeclRefExpr *ConfigDR = new (Context)
+ DeclRefExpr(ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
+ MarkFunctionReferenced(LLLLoc, ConfigDecl);
+
+ return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
+ /*IsExecConfig=*/true);
+}
+
+/// IdentifyCUDATarget - Determine the CUDA compilation target for this function
+Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) {
+ if (D->hasAttr<CUDAInvalidTargetAttr>())
+ return CFT_InvalidTarget;
+
+ if (D->hasAttr<CUDAGlobalAttr>())
+ return CFT_Global;
+
+ if (D->hasAttr<CUDADeviceAttr>()) {
+ if (D->hasAttr<CUDAHostAttr>())
+ return CFT_HostDevice;
+ return CFT_Device;
+ } else if (D->hasAttr<CUDAHostAttr>()) {
+ return CFT_Host;
+ } else if (D->isImplicit()) {
+ // Some implicit declarations (like intrinsic functions) are not marked.
+ // Set the most lenient target on them for maximal flexibility.
+ return CFT_HostDevice;
+ }
+
+ return CFT_Host;
+}
+
+bool Sema::CheckCUDATarget(const FunctionDecl *Caller,
+ const FunctionDecl *Callee) {
+ CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller),
+ CalleeTarget = IdentifyCUDATarget(Callee);
+
+ // If one of the targets is invalid, the check always fails, no matter what
+ // the other target is.
+ if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget)
+ return true;
+
+ // CUDA B.1.1 "The __device__ qualifier declares a function that is [...]
+ // Callable from the device only."
+ if (CallerTarget == CFT_Host && CalleeTarget == CFT_Device)
+ return true;
+
+ // CUDA B.1.2 "The __global__ qualifier declares a function that is [...]
+ // Callable from the host only."
+ // CUDA B.1.3 "The __host__ qualifier declares a function that is [...]
+ // Callable from the host only."
+ if ((CallerTarget == CFT_Device || CallerTarget == CFT_Global) &&
+ (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global))
+ return true;
+
+ // CUDA B.1.3 "The __device__ and __host__ qualifiers can be used together
+ // however, in which case the function is compiled for both the host and the
+ // device. The __CUDA_ARCH__ macro [...] can be used to differentiate code
+ // paths between host and device."
+ if (CallerTarget == CFT_HostDevice && CalleeTarget != CFT_HostDevice) {
+ // If the caller is implicit then the check always passes.
+ if (Caller->isImplicit()) return false;
+
+ bool InDeviceMode = getLangOpts().CUDAIsDevice;
+ if ((InDeviceMode && CalleeTarget != CFT_Device) ||
+ (!InDeviceMode && CalleeTarget != CFT_Host))
+ return true;
+ }
+
+ return false;
+}
+
+/// When an implicitly-declared special member has to invoke more than one
+/// base/field special member, conflicts may occur in the targets of these
+/// members. For example, if one base's member __host__ and another's is
+/// __device__, it's a conflict.
+/// This function figures out if the given targets \param Target1 and
+/// \param Target2 conflict, and if they do not it fills in
+/// \param ResolvedTarget with a target that resolves for both calls.
+/// \return true if there's a conflict, false otherwise.
+static bool
+resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,
+ Sema::CUDAFunctionTarget Target2,
+ Sema::CUDAFunctionTarget *ResolvedTarget) {
+ if (Target1 == Sema::CFT_Global && Target2 == Sema::CFT_Global) {
+ // TODO: this shouldn't happen, really. Methods cannot be marked __global__.
+ // Clang should detect this earlier and produce an error. Then this
+ // condition can be changed to an assertion.
+ return true;
+ }
+
+ if (Target1 == Sema::CFT_HostDevice) {
+ *ResolvedTarget = Target2;
+ } else if (Target2 == Sema::CFT_HostDevice) {
+ *ResolvedTarget = Target1;
+ } else if (Target1 != Target2) {
+ return true;
+ } else {
+ *ResolvedTarget = Target1;
+ }
+
+ return false;
+}
+
+bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
+ CXXSpecialMember CSM,
+ CXXMethodDecl *MemberDecl,
+ bool ConstRHS,
+ bool Diagnose) {
+ llvm::Optional<CUDAFunctionTarget> InferredTarget;
+
+ // We're going to invoke special member lookup; mark that these special
+ // members are called from this one, and not from its caller.
+ ContextRAII MethodContext(*this, MemberDecl);
+
+ // Look for special members in base classes that should be invoked from here.
+ // Infer the target of this member base on the ones it should call.
+ // Skip direct and indirect virtual bases for abstract classes.
+ llvm::SmallVector<const CXXBaseSpecifier *, 16> Bases;
+ for (const auto &B : ClassDecl->bases()) {
+ if (!B.isVirtual()) {
+ Bases.push_back(&B);
+ }
+ }
+
+ if (!ClassDecl->isAbstract()) {
+ for (const auto &VB : ClassDecl->vbases()) {
+ Bases.push_back(&VB);
+ }
+ }
+
+ for (const auto *B : Bases) {
+ const RecordType *BaseType = B->getType()->getAs<RecordType>();
+ if (!BaseType) {
+ continue;
+ }
+
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ Sema::SpecialMemberOverloadResult *SMOR =
+ LookupSpecialMember(BaseClassDecl, CSM,
+ /* ConstArg */ ConstRHS,
+ /* VolatileArg */ false,
+ /* RValueThis */ false,
+ /* ConstThis */ false,
+ /* VolatileThis */ false);
+
+ if (!SMOR || !SMOR->getMethod()) {
+ continue;
+ }
+
+ CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR->getMethod());
+ if (!InferredTarget.hasValue()) {
+ InferredTarget = BaseMethodTarget;
+ } else {
+ bool ResolutionError = resolveCalleeCUDATargetConflict(
+ InferredTarget.getValue(), BaseMethodTarget,
+ InferredTarget.getPointer());
+ if (ResolutionError) {
+ if (Diagnose) {
+ Diag(ClassDecl->getLocation(),
+ diag::note_implicit_member_target_infer_collision)
+ << (unsigned)CSM << InferredTarget.getValue() << BaseMethodTarget;
+ }
+ MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
+ return true;
+ }
+ }
+ }
+
+ // Same as for bases, but now for special members of fields.
+ for (const auto *F : ClassDecl->fields()) {
+ if (F->isInvalidDecl()) {
+ continue;
+ }
+
+ const RecordType *FieldType =
+ Context.getBaseElementType(F->getType())->getAs<RecordType>();
+ if (!FieldType) {
+ continue;
+ }
+
+ CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl());
+ Sema::SpecialMemberOverloadResult *SMOR =
+ LookupSpecialMember(FieldRecDecl, CSM,
+ /* ConstArg */ ConstRHS && !F->isMutable(),
+ /* VolatileArg */ false,
+ /* RValueThis */ false,
+ /* ConstThis */ false,
+ /* VolatileThis */ false);
+
+ if (!SMOR || !SMOR->getMethod()) {
+ continue;
+ }
+
+ CUDAFunctionTarget FieldMethodTarget =
+ IdentifyCUDATarget(SMOR->getMethod());
+ if (!InferredTarget.hasValue()) {
+ InferredTarget = FieldMethodTarget;
+ } else {
+ bool ResolutionError = resolveCalleeCUDATargetConflict(
+ InferredTarget.getValue(), FieldMethodTarget,
+ InferredTarget.getPointer());
+ if (ResolutionError) {
+ if (Diagnose) {
+ Diag(ClassDecl->getLocation(),
+ diag::note_implicit_member_target_infer_collision)
+ << (unsigned)CSM << InferredTarget.getValue()
+ << FieldMethodTarget;
+ }
+ MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
+ return true;
+ }
+ }
+ }
+
+ if (InferredTarget.hasValue()) {
+ if (InferredTarget.getValue() == CFT_Device) {
+ MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ } else if (InferredTarget.getValue() == CFT_Host) {
+ MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
+ } else {
+ MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
+ }
+ } else {
+ // If no target was inferred, mark this member as __host__ __device__;
+ // it's the least restrictive option that can be invoked from any target.
+ MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
+ }
+
+ return false;
+}
diff --git a/lib/Sema/SemaCXXScopeSpec.cpp b/lib/Sema/SemaCXXScopeSpec.cpp
index a70aca2ad86a..3e56e676a15c 100644
--- a/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/lib/Sema/SemaCXXScopeSpec.cpp
@@ -148,6 +148,9 @@ DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
case NestedNameSpecifier::Global:
return Context.getTranslationUnitDecl();
+
+ case NestedNameSpecifier::Super:
+ return NNS->getAsRecordDecl();
}
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
@@ -240,12 +243,43 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
return true;
}
-bool Sema::ActOnCXXGlobalScopeSpecifier(Scope *S, SourceLocation CCLoc,
+bool Sema::ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc,
CXXScopeSpec &SS) {
SS.MakeGlobal(Context, CCLoc);
return false;
}
+bool Sema::ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc,
+ CXXScopeSpec &SS) {
+ CXXRecordDecl *RD = nullptr;
+ for (Scope *S = getCurScope(); S; S = S->getParent()) {
+ if (S->isFunctionScope()) {
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(S->getEntity()))
+ RD = MD->getParent();
+ break;
+ }
+ if (S->isClassScope()) {
+ RD = cast<CXXRecordDecl>(S->getEntity());
+ break;
+ }
+ }
+
+ if (!RD) {
+ Diag(SuperLoc, diag::err_invalid_super_scope);
+ return true;
+ } else if (RD->isLambda()) {
+ Diag(SuperLoc, diag::err_super_in_lambda_unsupported);
+ return true;
+ } else if (RD->getNumBases() == 0) {
+ Diag(SuperLoc, diag::err_no_base_classes) << RD->getName();
+ return true;
+ }
+
+ SS.MakeSuper(Context, RD, SuperLoc, ColonColonLoc);
+ return false;
+}
+
/// \brief Determines whether the given declaration is an valid acceptable
/// result for name lookup of a nested-name-specifier.
bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD) {
@@ -376,9 +410,6 @@ class NestedNameSpecifierValidatorCCC : public CorrectionCandidateCallback {
/// \brief Build a new nested-name-specifier for "identifier::", as described
/// by ActOnCXXNestedNameSpecifier.
///
-/// This routine differs only slightly from ActOnCXXNestedNameSpecifier, in
-/// that it contains an extra parameter \p ScopeLookupResult.
-///
/// \param S Scope in which the nested-name-specifier occurs.
/// \param Identifier Identifier in the sequence "identifier" "::".
/// \param IdentifierLoc Location of the \p Identifier.
@@ -541,12 +572,11 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
// We haven't found anything, and we're not recovering from a
// different kind of error, so look for typos.
DeclarationName Name = Found.getLookupName();
- NestedNameSpecifierValidatorCCC Validator(*this);
Found.clear();
- if (TypoCorrection Corrected =
- CorrectTypo(Found.getLookupNameInfo(), Found.getLookupKind(), S,
- &SS, Validator, CTK_ErrorRecovery, LookupCtx,
- EnteringContext)) {
+ if (TypoCorrection Corrected = CorrectTypo(
+ Found.getLookupNameInfo(), Found.getLookupKind(), S, &SS,
+ llvm::make_unique<NestedNameSpecifierValidatorCCC>(*this),
+ CTK_ErrorRecovery, LookupCtx, EnteringContext)) {
if (LookupCtx) {
bool DroppedSpecifier =
Corrected.WillReplaceSpecifier() &&
@@ -612,10 +642,17 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
}
}
+ if (auto *TD = dyn_cast_or_null<TypedefNameDecl>(SD))
+ MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
+
// If we're just performing this lookup for error-recovery purposes,
// don't extend the nested-name-specifier. Just return now.
if (ErrorRecoveryLookup)
return false;
+
+ // The use of a nested name specifier may trigger deprecation warnings.
+ DiagnoseUseOfDecl(SD, CCLoc);
+
if (NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(SD)) {
SS.Extend(Context, Namespace, IdentifierLoc, CCLoc);
@@ -703,8 +740,13 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
if (getLangOpts().MSVCCompat) {
DeclContext *DC = LookupCtx ? LookupCtx : CurContext;
if (DC->isDependentContext() && DC->isFunctionOrMethod()) {
- SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc);
- return false;
+ CXXRecordDecl *ContainingClass = dyn_cast<CXXRecordDecl>(DC->getParent());
+ if (ContainingClass && ContainingClass->hasAnyDependentBases()) {
+ Diag(IdentifierLoc, diag::ext_undeclared_unqual_id_with_dependent_base)
+ << &Identifier << ContainingClass;
+ SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc);
+ return false;
+ }
}
}
@@ -945,6 +987,7 @@ bool Sema::ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
case NestedNameSpecifier::Identifier:
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::Super:
// These are never namespace scopes.
return true;
}
diff --git a/lib/Sema/SemaCast.cpp b/lib/Sema/SemaCast.cpp
index ae5436cf415c..a4c2d9b51c26 100644
--- a/lib/Sema/SemaCast.cpp
+++ b/lib/Sema/SemaCast.cpp
@@ -142,9 +142,6 @@ namespace {
};
}
-static bool CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
- bool CheckCVR, bool CheckObjCLifetime);
-
// The Try functions attempt a specific way of casting. If they succeed, they
// return TC_Success. If their way of casting is not appropriate for the given
// arguments, they return TC_NotApplicable and *may* set diag to a diagnostic
@@ -243,10 +240,8 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
QualType DestType = DestTInfo->getType();
// If the type is dependent, we won't do the semantic analysis now.
- // FIXME: should we check this in a more fine-grained manner?
- bool TypeDependent = DestType->isDependentType() ||
- Ex.get()->isTypeDependent() ||
- Ex.get()->isValueDependent();
+ bool TypeDependent =
+ DestType->isDependentType() || Ex.get()->isTypeDependent();
CastOperation Op(*this, DestType, E);
Op.OpRange = SourceRange(OpLoc, Parens.getEnd());
@@ -462,7 +457,10 @@ static bool UnwrapDissimilarPointerTypes(QualType& T1, QualType& T2) {
/// \param CheckObjCLifetime Whether to check Objective-C lifetime qualifiers.
static bool
CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
- bool CheckCVR, bool CheckObjCLifetime) {
+ bool CheckCVR, bool CheckObjCLifetime,
+ QualType *TheOffendingSrcType = nullptr,
+ QualType *TheOffendingDestType = nullptr,
+ Qualifiers *CastAwayQualifiers = nullptr) {
// If the only checking we care about is for Objective-C lifetime qualifiers,
// and we're not in ARC mode, there's nothing to check.
if (!CheckCVR && CheckObjCLifetime &&
@@ -487,6 +485,8 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
// Find the qualifiers. We only care about cvr-qualifiers for the
// purpose of this check, because other qualifiers (address spaces,
// Objective-C GC, etc.) are part of the type's identity.
+ QualType PrevUnwrappedSrcType = UnwrappedSrcType;
+ QualType PrevUnwrappedDestType = UnwrappedDestType;
while (UnwrapDissimilarPointerTypes(UnwrappedSrcType, UnwrappedDestType)) {
// Determine the relevant qualifiers at this level.
Qualifiers SrcQuals, DestQuals;
@@ -497,6 +497,13 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
if (CheckCVR) {
RetainedSrcQuals.setCVRQualifiers(SrcQuals.getCVRQualifiers());
RetainedDestQuals.setCVRQualifiers(DestQuals.getCVRQualifiers());
+
+ if (RetainedSrcQuals != RetainedDestQuals && TheOffendingSrcType &&
+ TheOffendingDestType && CastAwayQualifiers) {
+ *TheOffendingSrcType = PrevUnwrappedSrcType;
+ *TheOffendingDestType = PrevUnwrappedDestType;
+ *CastAwayQualifiers = RetainedSrcQuals - RetainedDestQuals;
+ }
}
if (CheckObjCLifetime &&
@@ -505,6 +512,9 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
cv1.push_back(RetainedSrcQuals);
cv2.push_back(RetainedDestQuals);
+
+ PrevUnwrappedSrcType = UnwrappedSrcType;
+ PrevUnwrappedDestType = UnwrappedDestType;
}
if (cv1.empty())
return false;
@@ -2200,8 +2210,8 @@ void CastOperation::CheckCStyleCast() {
// address space B is illegal.
if (Self.getLangOpts().OpenCL && DestType->isPointerType() &&
SrcType->isPointerType()) {
- if (DestType->getPointeeType().getAddressSpace() !=
- SrcType->getPointeeType().getAddressSpace()) {
+ const PointerType *DestPtr = DestType->getAs<PointerType>();
+ if (!DestPtr->isAddressSpaceOverlapping(*SrcType->getAs<PointerType>())) {
Self.Diag(OpRange.getBegin(),
diag::err_typecheck_incompatible_address_space)
<< SrcType << DestType << Sema::AA_Casting
@@ -2371,6 +2381,30 @@ void CastOperation::CheckCStyleCast() {
if (Kind == CK_BitCast)
checkCastAlign();
+
+ // -Wcast-qual
+ QualType TheOffendingSrcType, TheOffendingDestType;
+ Qualifiers CastAwayQualifiers;
+ if (SrcType->isAnyPointerType() && DestType->isAnyPointerType() &&
+ CastsAwayConstness(Self, SrcType, DestType, true, false,
+ &TheOffendingSrcType, &TheOffendingDestType,
+ &CastAwayQualifiers)) {
+ int qualifiers = -1;
+ if (CastAwayQualifiers.hasConst() && CastAwayQualifiers.hasVolatile()) {
+ qualifiers = 0;
+ } else if (CastAwayQualifiers.hasConst()) {
+ qualifiers = 1;
+ } else if (CastAwayQualifiers.hasVolatile()) {
+ qualifiers = 2;
+ }
+ // This is a variant of int **x; const int **y = (const int **)x;
+ if (qualifiers == -1)
+ Self.Diag(SrcExpr.get()->getLocStart(), diag::warn_cast_qual2) <<
+ SrcType << DestType;
+ else
+ Self.Diag(SrcExpr.get()->getLocStart(), diag::warn_cast_qual) <<
+ TheOffendingSrcType << TheOffendingDestType << qualifiers;
+ }
}
ExprResult Sema::BuildCStyleCastExpr(SourceLocation LPLoc,
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index 66be962fcf33..9fc2bec23b1b 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -111,8 +111,100 @@ static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
return false;
}
+static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl,
+ CallExpr *TheCall, unsigned SizeIdx,
+ unsigned DstSizeIdx) {
+ if (TheCall->getNumArgs() <= SizeIdx ||
+ TheCall->getNumArgs() <= DstSizeIdx)
+ return;
+
+ const Expr *SizeArg = TheCall->getArg(SizeIdx);
+ const Expr *DstSizeArg = TheCall->getArg(DstSizeIdx);
+
+ llvm::APSInt Size, DstSize;
+
+ // find out if both sizes are known at compile time
+ if (!SizeArg->EvaluateAsInt(Size, S.Context) ||
+ !DstSizeArg->EvaluateAsInt(DstSize, S.Context))
+ return;
+
+ if (Size.ule(DstSize))
+ return;
+
+ // confirmed overflow so generate the diagnostic.
+ IdentifierInfo *FnName = FDecl->getIdentifier();
+ SourceLocation SL = TheCall->getLocStart();
+ SourceRange SR = TheCall->getSourceRange();
+
+ S.Diag(SL, diag::warn_memcpy_chk_overflow) << SR << FnName;
+}
+
+static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
+ if (checkArgCount(S, BuiltinCall, 2))
+ return true;
+
+ SourceLocation BuiltinLoc = BuiltinCall->getLocStart();
+ Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
+ Expr *Call = BuiltinCall->getArg(0);
+ Expr *Chain = BuiltinCall->getArg(1);
+
+ if (Call->getStmtClass() != Stmt::CallExprClass) {
+ S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
+ << Call->getSourceRange();
+ return true;
+ }
+
+ auto CE = cast<CallExpr>(Call);
+ if (CE->getCallee()->getType()->isBlockPointerType()) {
+ S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
+ << Call->getSourceRange();
+ return true;
+ }
+
+ const Decl *TargetDecl = CE->getCalleeDecl();
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
+ if (FD->getBuiltinID()) {
+ S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
+ << Call->getSourceRange();
+ return true;
+ }
+
+ if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
+ S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
+ << Call->getSourceRange();
+ return true;
+ }
+
+ ExprResult ChainResult = S.UsualUnaryConversions(Chain);
+ if (ChainResult.isInvalid())
+ return true;
+ if (!ChainResult.get()->getType()->isPointerType()) {
+ S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
+ << Chain->getSourceRange();
+ return true;
+ }
+
+ QualType ReturnTy = CE->getCallReturnType();
+ QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
+ QualType BuiltinTy = S.Context.getFunctionType(
+ ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
+ QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
+
+ Builtin =
+ S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
+
+ BuiltinCall->setType(CE->getType());
+ BuiltinCall->setValueKind(CE->getValueKind());
+ BuiltinCall->setObjectKind(CE->getObjectKind());
+ BuiltinCall->setCallee(Builtin);
+ BuiltinCall->setArg(1, ChainResult.get());
+
+ return false;
+}
+
ExprResult
-Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
+ CallExpr *TheCall) {
ExprResult TheCallResult(TheCall);
// Find out if any arguments are required to be integer constant expressions.
@@ -189,9 +281,14 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return ExprError();
break;
case Builtin::BI__assume:
+ case Builtin::BI__builtin_assume:
if (SemaBuiltinAssume(TheCall))
return ExprError();
break;
+ case Builtin::BI__builtin_assume_aligned:
+ if (SemaBuiltinAssumeAligned(TheCall))
+ return ExprError();
+ break;
case Builtin::BI__builtin_object_size:
if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
return ExprError();
@@ -239,6 +336,12 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Builtin::BI__sync_fetch_and_xor_4:
case Builtin::BI__sync_fetch_and_xor_8:
case Builtin::BI__sync_fetch_and_xor_16:
+ case Builtin::BI__sync_fetch_and_nand:
+ case Builtin::BI__sync_fetch_and_nand_1:
+ case Builtin::BI__sync_fetch_and_nand_2:
+ case Builtin::BI__sync_fetch_and_nand_4:
+ case Builtin::BI__sync_fetch_and_nand_8:
+ case Builtin::BI__sync_fetch_and_nand_16:
case Builtin::BI__sync_add_and_fetch:
case Builtin::BI__sync_add_and_fetch_1:
case Builtin::BI__sync_add_and_fetch_2:
@@ -269,6 +372,12 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Builtin::BI__sync_xor_and_fetch_4:
case Builtin::BI__sync_xor_and_fetch_8:
case Builtin::BI__sync_xor_and_fetch_16:
+ case Builtin::BI__sync_nand_and_fetch:
+ case Builtin::BI__sync_nand_and_fetch_1:
+ case Builtin::BI__sync_nand_and_fetch_2:
+ case Builtin::BI__sync_nand_and_fetch_4:
+ case Builtin::BI__sync_nand_and_fetch_8:
+ case Builtin::BI__sync_nand_and_fetch_16:
case Builtin::BI__sync_val_compare_and_swap:
case Builtin::BI__sync_val_compare_and_swap_1:
case Builtin::BI__sync_val_compare_and_swap_2:
@@ -327,6 +436,31 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
// so ensure that they are declared.
DeclareGlobalNewDelete();
break;
+
+ // check secure string manipulation functions where overflows
+ // are detectable at compile time
+ case Builtin::BI__builtin___memcpy_chk:
+ case Builtin::BI__builtin___memmove_chk:
+ case Builtin::BI__builtin___memset_chk:
+ case Builtin::BI__builtin___strlcat_chk:
+ case Builtin::BI__builtin___strlcpy_chk:
+ case Builtin::BI__builtin___strncat_chk:
+ case Builtin::BI__builtin___strncpy_chk:
+ case Builtin::BI__builtin___stpncpy_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3);
+ break;
+ case Builtin::BI__builtin___memccpy_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 3, 4);
+ break;
+ case Builtin::BI__builtin___snprintf_chk:
+ case Builtin::BI__builtin___vsnprintf_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3);
+ break;
+
+ case Builtin::BI__builtin_call_with_static_chain:
+ if (SemaBuiltinCallWithStaticChain(*this, TheCall))
+ return ExprError();
+ break;
}
// Since the target specific builtins for each arch overlap, only check those
@@ -342,8 +476,6 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- case llvm::Triple::arm64:
- case llvm::Triple::arm64_be:
if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
@@ -468,8 +600,7 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
QualType RHSTy = RHS.get()->getType();
llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
- bool IsPolyUnsigned =
- Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::arm64;
+ bool IsPolyUnsigned = Arch == llvm::Triple::aarch64;
bool IsInt64Long =
Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong;
QualType EltTy =
@@ -627,6 +758,11 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
}
+ if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
+ }
+
if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
return true;
@@ -641,7 +777,8 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case ARM::BI__builtin_arm_vcvtr_d: i = 1; u = 1; break;
case ARM::BI__builtin_arm_dmb:
case ARM::BI__builtin_arm_dsb:
- case ARM::BI__builtin_arm_isb: l = 0; u = 15; break;
+ case ARM::BI__builtin_arm_isb:
+ case ARM::BI__builtin_arm_dbg: l = 0; u = 15; break;
}
// FIXME: VFP Intrinsics should error if VFP not present.
@@ -659,6 +796,13 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
}
+ if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
+ SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
+ }
+
if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
return true;
@@ -672,7 +816,6 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
}
- // FIXME: VFP Intrinsics should error if VFP not present.
return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
}
@@ -693,12 +836,16 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
}
bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ unsigned i = 0, l = 0, u = 0;
switch (BuiltinID) {
- case X86::BI_mm_prefetch:
- // This is declared to take (const char*, int)
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
+ default: return false;
+ case X86::BI_mm_prefetch: i = 1; l = 0; u = 3; break;
+ case X86::BI__builtin_ia32_cmpps:
+ case X86::BI__builtin_ia32_cmpss:
+ case X86::BI__builtin_ia32_cmppd:
+ case X86::BI__builtin_ia32_cmpsd: i = 2; l = 0; u = 31; break;
}
- return false;
+ return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
@@ -753,14 +900,79 @@ static void CheckNonNullArgument(Sema &S,
S.Diag(CallSiteLoc, diag::warn_null_arg) << ArgExpr->getSourceRange();
}
+bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
+ FormatStringInfo FSI;
+ if ((GetFormatStringType(Format) == FST_NSString) &&
+ getFormatStringInfo(Format, false, &FSI)) {
+ Idx = FSI.FormatIdx;
+ return true;
+ }
+ return false;
+}
+/// \brief Diagnose use of %s directive in an NSString which is being passed
+/// as formatting string to formatting method.
+static void
+DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
+ const NamedDecl *FDecl,
+ Expr **Args,
+ unsigned NumArgs) {
+ unsigned Idx = 0;
+ bool Format = false;
+ ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
+ if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
+ Idx = 2;
+ Format = true;
+ }
+ else
+ for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
+ if (S.GetFormatNSStringIdx(I, Idx)) {
+ Format = true;
+ break;
+ }
+ }
+ if (!Format || NumArgs <= Idx)
+ return;
+ const Expr *FormatExpr = Args[Idx];
+ if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
+ FormatExpr = CSCE->getSubExpr();
+ const StringLiteral *FormatString;
+ if (const ObjCStringLiteral *OSL =
+ dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
+ FormatString = OSL->getString();
+ else
+ FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
+ if (!FormatString)
+ return;
+ if (S.FormatStringHasSArg(FormatString)) {
+ S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
+ << "%s" << 1 << 1;
+ S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
+ << FDecl->getDeclName();
+ }
+}
+
static void CheckNonNullArguments(Sema &S,
const NamedDecl *FDecl,
- const Expr * const *ExprArgs,
+ ArrayRef<const Expr *> Args,
SourceLocation CallSiteLoc) {
// Check the attributes attached to the method/function itself.
+ llvm::SmallBitVector NonNullArgs;
for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
- for (const auto &Val : NonNull->args())
- CheckNonNullArgument(S, ExprArgs[Val], CallSiteLoc);
+ if (!NonNull->args_size()) {
+ // Easy case: all pointer arguments are nonnull.
+ for (const auto *Arg : Args)
+ if (S.isValidPointerAttrType(Arg->getType()))
+ CheckNonNullArgument(S, Arg, CallSiteLoc);
+ return;
+ }
+
+ for (unsigned Val : NonNull->args()) {
+ if (Val >= Args.size())
+ continue;
+ if (NonNullArgs.empty())
+ NonNullArgs.resize(Args.size());
+ NonNullArgs.set(Val);
+ }
}
// Check the attributes on the parameters.
@@ -770,13 +982,19 @@ static void CheckNonNullArguments(Sema &S,
else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(FDecl))
parms = MD->parameters();
- unsigned argIndex = 0;
+ unsigned ArgIndex = 0;
for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
- I != E; ++I, ++argIndex) {
+ I != E; ++I, ++ArgIndex) {
const ParmVarDecl *PVD = *I;
- if (PVD->hasAttr<NonNullAttr>())
- CheckNonNullArgument(S, ExprArgs[argIndex], CallSiteLoc);
+ if (PVD->hasAttr<NonNullAttr>() ||
+ (ArgIndex < NonNullArgs.size() && NonNullArgs[ArgIndex]))
+ CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
}
+
+ // In case this is a variadic call, check any remaining arguments.
+ for (/**/; ArgIndex < NonNullArgs.size(); ++ArgIndex)
+ if (NonNullArgs[ArgIndex])
+ CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
}
/// Handles the checks for format strings, non-POD arguments to vararg
@@ -814,7 +1032,7 @@ void Sema::checkCall(NamedDecl *FDecl, ArrayRef<const Expr *> Args,
}
if (FDecl) {
- CheckNonNullArguments(*this, FDecl, Args.data(), Loc);
+ CheckNonNullArguments(*this, FDecl, Args, Loc);
// Type safety checking.
for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
@@ -854,7 +1072,7 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
++Args;
--NumArgs;
}
- checkCall(FDecl, llvm::makeArrayRef<const Expr *>(Args, NumArgs), NumParams,
+ checkCall(FDecl, llvm::makeArrayRef(Args, NumArgs), NumParams,
IsMemberFunction, TheCall->getRParenLoc(),
TheCall->getCallee()->getSourceRange(), CallType);
@@ -865,6 +1083,8 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
return false;
CheckAbsoluteValueFunction(TheCall, FDecl, FnInfo);
+ if (getLangOpts().ObjC1)
+ DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
unsigned CMId = FDecl->getMemoryFunctionKind();
if (CMId == 0)
@@ -913,8 +1133,8 @@ bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
}
unsigned NumParams = Proto ? Proto->getNumParams() : 0;
- checkCall(NDecl, llvm::makeArrayRef<const Expr *>(TheCall->getArgs(),
- TheCall->getNumArgs()),
+ checkCall(NDecl, llvm::makeArrayRef(TheCall->getArgs(),
+ TheCall->getNumArgs()),
NumParams, /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
TheCall->getCallee()->getSourceRange(), CallType);
@@ -929,8 +1149,7 @@ bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
unsigned NumParams = Proto ? Proto->getNumParams() : 0;
checkCall(/*FDecl=*/nullptr,
- llvm::makeArrayRef<const Expr *>(TheCall->getArgs(),
- TheCall->getNumArgs()),
+ llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
NumParams, /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
TheCall->getCallee()->getSourceRange(), CallType);
@@ -1383,12 +1602,14 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
BUILTIN_ROW(__sync_fetch_and_or),
BUILTIN_ROW(__sync_fetch_and_and),
BUILTIN_ROW(__sync_fetch_and_xor),
+ BUILTIN_ROW(__sync_fetch_and_nand),
BUILTIN_ROW(__sync_add_and_fetch),
BUILTIN_ROW(__sync_sub_and_fetch),
BUILTIN_ROW(__sync_and_and_fetch),
BUILTIN_ROW(__sync_or_and_fetch),
BUILTIN_ROW(__sync_xor_and_fetch),
+ BUILTIN_ROW(__sync_nand_and_fetch),
BUILTIN_ROW(__sync_val_compare_and_swap),
BUILTIN_ROW(__sync_bool_compare_and_swap),
@@ -1418,6 +1639,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// as the number of fixed args.
unsigned BuiltinID = FDecl->getBuiltinID();
unsigned BuiltinIndex, NumFixed = 1;
+ bool WarnAboutSemanticsChange = false;
switch (BuiltinID) {
default: llvm_unreachable("Unknown overloaded atomic builtin!");
case Builtin::BI__sync_fetch_and_add:
@@ -1465,13 +1687,23 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
BuiltinIndex = 4;
break;
+ case Builtin::BI__sync_fetch_and_nand:
+ case Builtin::BI__sync_fetch_and_nand_1:
+ case Builtin::BI__sync_fetch_and_nand_2:
+ case Builtin::BI__sync_fetch_and_nand_4:
+ case Builtin::BI__sync_fetch_and_nand_8:
+ case Builtin::BI__sync_fetch_and_nand_16:
+ BuiltinIndex = 5;
+ WarnAboutSemanticsChange = true;
+ break;
+
case Builtin::BI__sync_add_and_fetch:
case Builtin::BI__sync_add_and_fetch_1:
case Builtin::BI__sync_add_and_fetch_2:
case Builtin::BI__sync_add_and_fetch_4:
case Builtin::BI__sync_add_and_fetch_8:
case Builtin::BI__sync_add_and_fetch_16:
- BuiltinIndex = 5;
+ BuiltinIndex = 6;
break;
case Builtin::BI__sync_sub_and_fetch:
@@ -1480,7 +1712,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
case Builtin::BI__sync_sub_and_fetch_4:
case Builtin::BI__sync_sub_and_fetch_8:
case Builtin::BI__sync_sub_and_fetch_16:
- BuiltinIndex = 6;
+ BuiltinIndex = 7;
break;
case Builtin::BI__sync_and_and_fetch:
@@ -1489,7 +1721,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
case Builtin::BI__sync_and_and_fetch_4:
case Builtin::BI__sync_and_and_fetch_8:
case Builtin::BI__sync_and_and_fetch_16:
- BuiltinIndex = 7;
+ BuiltinIndex = 8;
break;
case Builtin::BI__sync_or_and_fetch:
@@ -1498,7 +1730,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
case Builtin::BI__sync_or_and_fetch_4:
case Builtin::BI__sync_or_and_fetch_8:
case Builtin::BI__sync_or_and_fetch_16:
- BuiltinIndex = 8;
+ BuiltinIndex = 9;
break;
case Builtin::BI__sync_xor_and_fetch:
@@ -1507,7 +1739,17 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
case Builtin::BI__sync_xor_and_fetch_4:
case Builtin::BI__sync_xor_and_fetch_8:
case Builtin::BI__sync_xor_and_fetch_16:
- BuiltinIndex = 9;
+ BuiltinIndex = 10;
+ break;
+
+ case Builtin::BI__sync_nand_and_fetch:
+ case Builtin::BI__sync_nand_and_fetch_1:
+ case Builtin::BI__sync_nand_and_fetch_2:
+ case Builtin::BI__sync_nand_and_fetch_4:
+ case Builtin::BI__sync_nand_and_fetch_8:
+ case Builtin::BI__sync_nand_and_fetch_16:
+ BuiltinIndex = 11;
+ WarnAboutSemanticsChange = true;
break;
case Builtin::BI__sync_val_compare_and_swap:
@@ -1516,7 +1758,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
case Builtin::BI__sync_val_compare_and_swap_4:
case Builtin::BI__sync_val_compare_and_swap_8:
case Builtin::BI__sync_val_compare_and_swap_16:
- BuiltinIndex = 10;
+ BuiltinIndex = 12;
NumFixed = 2;
break;
@@ -1526,7 +1768,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
case Builtin::BI__sync_bool_compare_and_swap_4:
case Builtin::BI__sync_bool_compare_and_swap_8:
case Builtin::BI__sync_bool_compare_and_swap_16:
- BuiltinIndex = 11;
+ BuiltinIndex = 13;
NumFixed = 2;
ResultType = Context.BoolTy;
break;
@@ -1537,7 +1779,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
case Builtin::BI__sync_lock_test_and_set_4:
case Builtin::BI__sync_lock_test_and_set_8:
case Builtin::BI__sync_lock_test_and_set_16:
- BuiltinIndex = 12;
+ BuiltinIndex = 14;
break;
case Builtin::BI__sync_lock_release:
@@ -1546,7 +1788,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
case Builtin::BI__sync_lock_release_4:
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16:
- BuiltinIndex = 13;
+ BuiltinIndex = 15;
NumFixed = 0;
ResultType = Context.VoidTy;
break;
@@ -1557,7 +1799,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
case Builtin::BI__sync_swap_4:
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
- BuiltinIndex = 14;
+ BuiltinIndex = 16;
break;
}
@@ -1570,6 +1812,11 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
return ExprError();
}
+ if (WarnAboutSemanticsChange) {
+ Diag(TheCall->getLocEnd(), diag::warn_sync_fetch_and_nand_semantics_change)
+ << TheCall->getCallee()->getSourceRange();
+ }
+
// Get the decl for the concrete builtin from this, we can tell what the
// concrete integer type we should convert to is.
unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
@@ -2031,7 +2278,46 @@ bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
if (Arg->HasSideEffects(Context))
return Diag(Arg->getLocStart(), diag::warn_assume_side_effects)
- << Arg->getSourceRange();
+ << Arg->getSourceRange()
+ << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier();
+
+ return false;
+}
+
+/// Handle __builtin_assume_aligned. This is declared
+/// as (const void*, size_t, ...) and can take one optional constant int arg.
+bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
+ unsigned NumArgs = TheCall->getNumArgs();
+
+ if (NumArgs > 3)
+ return Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_many_args_at_most)
+ << 0 /*function call*/ << 3 << NumArgs
+ << TheCall->getSourceRange();
+
+ // The alignment must be a constant integer.
+ Expr *Arg = TheCall->getArg(1);
+
+ // We can't check the value of a dependent argument.
+ if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
+ llvm::APSInt Result;
+ if (SemaBuiltinConstantArg(TheCall, 1, Result))
+ return true;
+
+ if (!Result.isPowerOf2())
+ return Diag(TheCall->getLocStart(),
+ diag::err_alignment_not_power_of_two)
+ << Arg->getSourceRange();
+ }
+
+ if (NumArgs > 2) {
+ ExprResult Arg(TheCall->getArg(2));
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ Context.getSizeType(), false);
+ Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
+ if (Arg.isInvalid()) return true;
+ TheCall->setArg(2, Arg.get());
+ }
return false;
}
@@ -3178,6 +3464,61 @@ static bool requiresParensToAddCast(const Expr *E) {
}
}
+static std::pair<QualType, StringRef>
+shouldNotPrintDirectly(const ASTContext &Context,
+ QualType IntendedTy,
+ const Expr *E) {
+ // Use a 'while' to peel off layers of typedefs.
+ QualType TyTy = IntendedTy;
+ while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) {
+ StringRef Name = UserTy->getDecl()->getName();
+ QualType CastTy = llvm::StringSwitch<QualType>(Name)
+ .Case("NSInteger", Context.LongTy)
+ .Case("NSUInteger", Context.UnsignedLongTy)
+ .Case("SInt32", Context.IntTy)
+ .Case("UInt32", Context.UnsignedIntTy)
+ .Default(QualType());
+
+ if (!CastTy.isNull())
+ return std::make_pair(CastTy, Name);
+
+ TyTy = UserTy->desugar();
+ }
+
+ // Strip parens if necessary.
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
+ return shouldNotPrintDirectly(Context,
+ PE->getSubExpr()->getType(),
+ PE->getSubExpr());
+
+ // If this is a conditional expression, then its result type is constructed
+ // via usual arithmetic conversions and thus there might be no necessary
+ // typedef sugar there. Recurse to operands to check for NSInteger &
+ // Co. usage condition.
+ if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ QualType TrueTy, FalseTy;
+ StringRef TrueName, FalseName;
+
+ std::tie(TrueTy, TrueName) =
+ shouldNotPrintDirectly(Context,
+ CO->getTrueExpr()->getType(),
+ CO->getTrueExpr());
+ std::tie(FalseTy, FalseName) =
+ shouldNotPrintDirectly(Context,
+ CO->getFalseExpr()->getType(),
+ CO->getFalseExpr());
+
+ if (TrueTy == FalseTy)
+ return std::make_pair(TrueTy, TrueName);
+ else if (TrueTy.isNull())
+ return std::make_pair(FalseTy, FalseName);
+ else if (FalseTy.isNull())
+ return std::make_pair(TrueTy, TrueName);
+ }
+
+ return std::make_pair(QualType(), StringRef());
+}
+
bool
CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
const char *StartSpecifier,
@@ -3269,25 +3610,13 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
// Special-case some of Darwin's platform-independence types by suggesting
// casts to primitive types that are known to be large enough.
- bool ShouldNotPrintDirectly = false;
+ bool ShouldNotPrintDirectly = false; StringRef CastTyName;
if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
- // Use a 'while' to peel off layers of typedefs.
- QualType TyTy = IntendedTy;
- while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) {
- StringRef Name = UserTy->getDecl()->getName();
- QualType CastTy = llvm::StringSwitch<QualType>(Name)
- .Case("NSInteger", S.Context.LongTy)
- .Case("NSUInteger", S.Context.UnsignedLongTy)
- .Case("SInt32", S.Context.IntTy)
- .Case("UInt32", S.Context.UnsignedIntTy)
- .Default(QualType());
-
- if (!CastTy.isNull()) {
- ShouldNotPrintDirectly = true;
- IntendedTy = CastTy;
- break;
- }
- TyTy = UserTy->desugar();
+ QualType CastTy;
+ std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E);
+ if (!CastTy.isNull()) {
+ IntendedTy = CastTy;
+ ShouldNotPrintDirectly = true;
}
}
@@ -3304,7 +3633,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen);
- if (IntendedTy == ExprTy) {
+ if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) {
// In this case, the specifier is wrong and should be changed to match
// the argument.
EmitFormatDiagnostic(
@@ -3358,8 +3687,11 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
// The expression has a type that should not be printed directly.
// We extract the name from the typedef because we don't want to show
// the underlying type in the diagnostic.
- StringRef Name = cast<TypedefType>(ExprTy)->getDecl()->getName();
-
+ StringRef Name;
+ if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy))
+ Name = TypedefTy->getDecl()->getName();
+ else
+ Name = CastTyName;
EmitFormatDiagnostic(S.PDiag(diag::warn_format_argument_needs_cast)
<< Name << IntendedTy << IsEnum
<< E->getSourceRange(),
@@ -3395,6 +3727,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
break;
case Sema::VAK_Undefined:
+ case Sema::VAK_MSVCUndefined:
EmitFormatDiagnostic(
S.PDiag(diag::warn_non_pod_vararg_with_format_string)
<< S.getLangOpts().CPlusPlus11
@@ -3672,6 +4005,20 @@ void Sema::CheckFormatString(const StringLiteral *FExpr,
} // TODO: handle other formats
}
+bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) {
+ // Str - The format string. NOTE: this is NOT null-terminated!
+ StringRef StrRef = FExpr->getString();
+ const char *Str = StrRef.data();
+ // Account for cases where the string literal is truncated in a declaration.
+ const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType());
+ assert(T && "String literal not of constant array type!");
+ size_t TypeSize = T->getSize().getZExtValue();
+ size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
+ return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen,
+ getLangOpts(),
+ Context.getTargetInfo());
+}
+
//===--- CHECK: Warn on use of wrong absolute value function. -------------===//
// Returns the related absolute value function that is larger, of 0 if one
@@ -4346,7 +4693,8 @@ void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName) {
// Don't crash if the user has the wrong number of arguments
- if (Call->getNumArgs() != 3)
+ unsigned NumArgs = Call->getNumArgs();
+ if ((NumArgs != 3) && (NumArgs != 4))
return;
const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context);
@@ -4628,7 +4976,7 @@ static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars,
DeclRefExpr *DR = cast<DeclRefExpr>(E);
// If we leave the immediate function, the lifetime isn't about to end.
- if (DR->refersToEnclosingLocal())
+ if (DR->refersToEnclosingVariableOrCapture())
return nullptr;
if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl()))
@@ -4795,7 +5143,7 @@ do {
DeclRefExpr *DR = cast<DeclRefExpr>(E);
// If we leave the immediate function, the lifetime isn't about to end.
- if (DR->refersToEnclosingLocal())
+ if (DR->refersToEnclosingVariableOrCapture())
return nullptr;
if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl())) {
@@ -5660,8 +6008,13 @@ static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
// The type the comparison is being performed in.
QualType T = E->getLHS()->getType();
- assert(S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())
- && "comparison with mismatched types");
+
+ // Only analyze comparison operators where both sides have been converted to
+ // the same type.
+ if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType()))
+ return AnalyzeImpConvsInComparison(S, E);
+
+ // Don't analyze value-dependent comparisons directly.
if (E->isValueDependent())
return AnalyzeImpConvsInComparison(S, E);
@@ -5932,6 +6285,41 @@ void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall,
}
}
+static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
+ SourceLocation CC) {
+ if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer,
+ E->getExprLoc()))
+ return;
+
+ // Check for NULL (GNUNull) or nullptr (CXX11_nullptr).
+ const Expr::NullPointerConstantKind NullKind =
+ E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull);
+ if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr)
+ return;
+
+ // Return if target type is a safe conversion.
+ if (T->isAnyPointerType() || T->isBlockPointerType() ||
+ T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType())
+ return;
+
+ SourceLocation Loc = E->getSourceRange().getBegin();
+
+ // __null is usually wrapped in a macro. Go up a macro if that is the case.
+ if (NullKind == Expr::NPCK_GNUNull) {
+ if (Loc.isMacroID())
+ Loc = S.SourceMgr.getImmediateExpansionRange(Loc).first;
+ }
+
+ // Only warn if the null and context location are in the same macro expansion.
+ if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC))
+ return;
+
+ S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
+ << (NullKind == Expr::NPCK_CXX11_nullptr) << T << clang::SourceRange(CC)
+ << FixItHint::CreateReplacement(Loc,
+ S.getFixItZeroLiteralForType(T, Loc));
+}
+
void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
SourceLocation CC, bool *ICContext = nullptr) {
if (E->isTypeDependent() || E->isValueDependent()) return;
@@ -6074,19 +6462,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
return;
}
- if ((E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)
- == Expr::NPCK_GNUNull) && !Target->isAnyPointerType()
- && !Target->isBlockPointerType() && !Target->isMemberPointerType()
- && Target->isScalarType() && !Target->isNullPtrType()) {
- SourceLocation Loc = E->getSourceRange().getBegin();
- if (Loc.isMacroID())
- Loc = S.SourceMgr.getImmediateExpansionRange(Loc).first;
- if (!Loc.isMacroID() || CC.isMacroID())
- S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
- << T << clang::SourceRange(CC)
- << FixItHint::CreateReplacement(Loc,
- S.getFixItZeroLiteralForType(T, Loc));
- }
+ DiagnoseNullConversion(S, E, T, CC);
if (!Source->isIntegerType() || !Target->isIntegerType())
return;
@@ -6196,7 +6572,7 @@ void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
SourceLocation CC, QualType T) {
- AnalyzeImplicitConversions(S, E->getCond(), CC);
+ AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc());
bool Suspicious = false;
CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious);
@@ -6222,6 +6598,14 @@ void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
E->getType(), CC, &Suspicious);
}
+/// CheckBoolLikeConversion - Check conversion of given expression to boolean.
+/// Input argument E is a logical expression.
+static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
+ if (S.getLangOpts().Bool)
+ return;
+ CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
+}
+
/// AnalyzeImplicitConversions - Find and report any interesting
/// implicit conversions in the given expression. There are a couple
/// of competing diagnostics here, -Wconversion and -Wsign-compare.
@@ -6301,6 +6685,20 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) {
continue;
AnalyzeImplicitConversions(S, ChildExpr, CC);
}
+
+ if (BO && BO->isLogicalOp()) {
+ Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts();
+ if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
+ ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
+
+ SubExpr = BO->getRHS()->IgnoreParenImpCasts();
+ if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
+ ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
+ }
+
+ if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E))
+ if (U->getOpcode() == UO_LNot)
+ ::CheckBoolLikeConversion(S, U->getSubExpr(), CC);
}
} // end anonymous namespace
@@ -6343,6 +6741,22 @@ static bool CheckForReference(Sema &SemaRef, const Expr *E,
return true;
}
+// Returns true if the SourceLocation is expanded from any macro body.
+// Returns false if the SourceLocation is invalid, is from not in a macro
+// expansion, or is from expanded from a top-level macro argument.
+static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) {
+ if (Loc.isInvalid())
+ return false;
+
+ while (Loc.isMacroID()) {
+ if (SM.isMacroBodyExpansion(Loc))
+ return true;
+ Loc = SM.getImmediateMacroCallerLoc(Loc);
+ }
+
+ return false;
+}
+
/// \brief Diagnose pointers that are always non-null.
/// \param E the expression containing the pointer
/// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
@@ -6356,8 +6770,12 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
return;
// Don't warn inside macros.
- if (E->getExprLoc().isMacroID())
+ if (E->getExprLoc().isMacroID()) {
+ const SourceManager &SM = getSourceManager();
+ if (IsInAnyMacroBody(SM, E->getExprLoc()) ||
+ IsInAnyMacroBody(SM, Range.getBegin()))
return;
+ }
E = E->IgnoreImpCasts();
const bool IsCompare = NullKind != Expr::NPCK_NotNull;
@@ -6400,7 +6818,40 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
// Weak Decls can be null.
if (!D || D->isWeak())
return;
-
+
+ // Check for parameter decl with nonnull attribute
+ if (const ParmVarDecl* PV = dyn_cast<ParmVarDecl>(D)) {
+ if (getCurFunction() && !getCurFunction()->ModifiedNonNullParams.count(PV))
+ if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
+ unsigned NumArgs = FD->getNumParams();
+ llvm::SmallBitVector AttrNonNull(NumArgs);
+ for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
+ if (!NonNull->args_size()) {
+ AttrNonNull.set(0, NumArgs);
+ break;
+ }
+ for (unsigned Val : NonNull->args()) {
+ if (Val >= NumArgs)
+ continue;
+ AttrNonNull.set(Val);
+ }
+ }
+ if (!AttrNonNull.empty())
+ for (unsigned i = 0; i < NumArgs; ++i)
+ if (FD->getParamDecl(i) == PV &&
+ (AttrNonNull[i] || PV->hasAttr<NonNullAttr>())) {
+ std::string Str;
+ llvm::raw_string_ostream S(Str);
+ E->printPretty(S, nullptr, getPrintingPolicy());
+ unsigned DiagID = IsCompare ? diag::warn_nonnull_parameter_compare
+ : diag::warn_cast_nonnull_to_bool;
+ Diag(E->getExprLoc(), DiagID) << S.str() << E->getSourceRange()
+ << Range << IsEqual;
+ return;
+ }
+ }
+ }
+
QualType T = D->getType();
const bool IsArray = T->isArrayType();
const bool IsFunction = T->isFunctionType();
@@ -6496,11 +6947,17 @@ void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
AnalyzeImplicitConversions(*this, E, CC);
}
+/// CheckBoolLikeConversion - Check conversion of given expression to boolean.
+/// Input argument E is a logical expression.
+void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) {
+ ::CheckBoolLikeConversion(*this, E, CC);
+}
+
/// Diagnose when expression is an integer constant expression and its evaluation
/// results in integer overflow
void Sema::CheckForIntOverflow (Expr *E) {
- if (isa<BinaryOperator>(E->IgnoreParens()))
- E->EvaluateForOverflow(Context);
+ if (isa<BinaryOperator>(E->IgnoreParenCasts()))
+ E->IgnoreParenCasts()->EvaluateForOverflow(Context);
}
namespace {
@@ -6630,11 +7087,12 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
Self.ModAsSideEffect = &ModAsSideEffect;
}
~SequencedSubexpression() {
- for (unsigned I = 0, E = ModAsSideEffect.size(); I != E; ++I) {
- UsageInfo &U = Self.UsageMap[ModAsSideEffect[I].first];
- U.Uses[UK_ModAsSideEffect] = ModAsSideEffect[I].second;
- Self.addUsage(U, ModAsSideEffect[I].first,
- ModAsSideEffect[I].second.Use, UK_ModAsValue);
+ for (auto MI = ModAsSideEffect.rbegin(), ME = ModAsSideEffect.rend();
+ MI != ME; ++MI) {
+ UsageInfo &U = Self.UsageMap[MI->first];
+ auto &SideEffectUsage = U.Uses[UK_ModAsSideEffect];
+ Self.addUsage(U, MI->first, SideEffectUsage.Use, UK_ModAsValue);
+ SideEffectUsage = MI->second;
}
Self.ModAsSideEffect = OldModAsSideEffect;
}
@@ -7867,6 +8325,96 @@ void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
}
}
+//===--- CHECK: Warn on self move with std::move. -------------------------===//
+
+/// DiagnoseSelfMove - Emits a warning if a value is moved to itself.
+void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
+ SourceLocation OpLoc) {
+
+ if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc))
+ return;
+
+ if (!ActiveTemplateInstantiations.empty())
+ return;
+
+ // Strip parens and casts away.
+ LHSExpr = LHSExpr->IgnoreParenImpCasts();
+ RHSExpr = RHSExpr->IgnoreParenImpCasts();
+
+ // Check for a call expression
+ const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr);
+ if (!CE || CE->getNumArgs() != 1)
+ return;
+
+ // Check for a call to std::move
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD || !FD->isInStdNamespace() || !FD->getIdentifier() ||
+ !FD->getIdentifier()->isStr("move"))
+ return;
+
+ // Get argument from std::move
+ RHSExpr = CE->getArg(0);
+
+ const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr);
+ const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr);
+
+ // Two DeclRefExpr's, check that the decls are the same.
+ if (LHSDeclRef && RHSDeclRef) {
+ if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
+ return;
+ if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
+ RHSDeclRef->getDecl()->getCanonicalDecl())
+ return;
+
+ Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
+ << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
+ return;
+ }
+
+ // Member variables require a different approach to check for self moves.
+ // MemberExpr's are the same if every nested MemberExpr refers to the same
+ // Decl and that the base Expr's are DeclRefExpr's with the same Decl or
+ // the base Expr's are CXXThisExpr's.
+ const Expr *LHSBase = LHSExpr;
+ const Expr *RHSBase = RHSExpr;
+ const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr);
+ const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr);
+ if (!LHSME || !RHSME)
+ return;
+
+ while (LHSME && RHSME) {
+ if (LHSME->getMemberDecl()->getCanonicalDecl() !=
+ RHSME->getMemberDecl()->getCanonicalDecl())
+ return;
+
+ LHSBase = LHSME->getBase();
+ RHSBase = RHSME->getBase();
+ LHSME = dyn_cast<MemberExpr>(LHSBase);
+ RHSME = dyn_cast<MemberExpr>(RHSBase);
+ }
+
+ LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase);
+ RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase);
+ if (LHSDeclRef && RHSDeclRef) {
+ if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
+ return;
+ if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
+ RHSDeclRef->getDecl()->getCanonicalDecl())
+ return;
+
+ Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
+ << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
+ return;
+ }
+
+ if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase))
+ Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
+ << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
+}
+
//===--- Layout compatibility ----------------------------------------------//
namespace {
diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp
index 3d250e3bef11..48bdd2a7d82c 100644
--- a/lib/Sema/SemaCodeComplete.cpp
+++ b/lib/Sema/SemaCodeComplete.cpp
@@ -292,7 +292,7 @@ namespace {
void MaybeAddResult(Result R, DeclContext *CurContext = nullptr);
/// \brief Add a new result to this result set, where we already know
- /// the hiding declation (if any).
+ /// the hiding declaration (if any).
///
/// \param R the result to add (if it is unique).
///
@@ -894,7 +894,7 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
}
// Make sure that any given declaration only shows up in the result set once.
- if (!AllDeclsFound.insert(CanonDecl))
+ if (!AllDeclsFound.insert(CanonDecl).second)
return;
// If the filter is for nested-name-specifiers, then this result starts a
@@ -957,7 +957,7 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
return;
// Make sure that any given declaration only shows up in the result set once.
- if (!AllDeclsFound.insert(R.Declaration->getCanonicalDecl()))
+ if (!AllDeclsFound.insert(R.Declaration->getCanonicalDecl()).second)
return;
// If the filter is for nested-name-specifiers, then this result starts a
@@ -2575,11 +2575,12 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
const MacroDirective *MD = PP.getMacroDirectiveHistory(Macro);
assert(MD && "Not a macro?");
const MacroInfo *MI = MD->getMacroInfo();
+ assert((!MD->isDefined() || MI) && "missing MacroInfo for define");
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(Macro->getName()));
- if (!MI->isFunctionLike())
+ if (!MI || !MI->isFunctionLike())
return Result.TakeString();
// Format a function-like macro with placeholders for the arguments.
@@ -3466,7 +3467,7 @@ static void AddObjCProperties(ObjCContainerDecl *Container,
// Add properties in this container.
for (const auto *P : Container->properties())
- if (AddedProperties.insert(P->getIdentifier()))
+ if (AddedProperties.insert(P->getIdentifier()).second)
Results.MaybeAddResult(Result(P, Results.getBasePriority(P), nullptr),
CurContext);
@@ -3477,7 +3478,7 @@ static void AddObjCProperties(ObjCContainerDecl *Container,
for (auto *M : Container->methods()) {
if (M->getSelector().isUnarySelector())
if (IdentifierInfo *Name = M->getSelector().getIdentifierInfoForSlot(0))
- if (AddedProperties.insert(Name)) {
+ if (AddedProperties.insert(Name).second) {
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
AddResultTypeChunk(Context, Policy, M, Builder);
@@ -4235,7 +4236,8 @@ void Sema::CodeCompleteConstructorInitializer(
bool SawLastInitializer = Initializers.empty();
CXXRecordDecl *ClassDecl = Constructor->getParent();
for (const auto &Base : ClassDecl->bases()) {
- if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))) {
+ if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
+ .second) {
SawLastInitializer
= !Initializers.empty() &&
Initializers.back()->isBaseInitializer() &&
@@ -4258,7 +4260,8 @@ void Sema::CodeCompleteConstructorInitializer(
// Add completions for virtual base classes.
for (const auto &Base : ClassDecl->vbases()) {
- if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))) {
+ if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
+ .second) {
SawLastInitializer
= !Initializers.empty() &&
Initializers.back()->isBaseInitializer() &&
@@ -4281,7 +4284,8 @@ void Sema::CodeCompleteConstructorInitializer(
// Add completions for members.
for (auto *Field : ClassDecl->fields()) {
- if (!InitializedFields.insert(cast<FieldDecl>(Field->getCanonicalDecl()))) {
+ if (!InitializedFields.insert(cast<FieldDecl>(Field->getCanonicalDecl()))
+ .second) {
SawLastInitializer
= !Initializers.empty() &&
Initializers.back()->isAnyMemberInitializer() &&
@@ -4348,7 +4352,7 @@ void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
Var->hasAttr<BlocksAttr>())
continue;
- if (Known.insert(Var->getIdentifier()))
+ if (Known.insert(Var->getIdentifier()).second)
Results.AddResult(CodeCompletionResult(Var, CCP_LocalDeclaration),
CurContext, nullptr, false);
}
@@ -4817,7 +4821,7 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
if (!isAcceptableObjCMethod(M, WantKind, SelIdents, AllowSameLength))
continue;
- if (!Selectors.insert(M->getSelector()))
+ if (!Selectors.insert(M->getSelector()).second)
continue;
Result R = Result(M, Results.getBasePriority(M), nullptr);
@@ -5400,13 +5404,13 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
MEnd = SemaRef.MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.second;
- MethList && MethList->Method;
+ MethList && MethList->getMethod();
MethList = MethList->getNext()) {
- if (!isAcceptableObjCMethod(MethList->Method, MK_Any, SelIdents))
+ if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
- Result R(MethList->Method, Results.getBasePriority(MethList->Method),
- nullptr);
+ Result R(MethList->getMethod(),
+ Results.getBasePriority(MethList->getMethod()), nullptr);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
Results.MaybeAddResult(R, SemaRef.CurContext);
@@ -5573,16 +5577,16 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
MEnd = MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.first;
- MethList && MethList->Method;
+ MethList && MethList->getMethod();
MethList = MethList->getNext()) {
- if (!isAcceptableObjCMethod(MethList->Method, MK_Any, SelIdents))
+ if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
- if (!Selectors.insert(MethList->Method->getSelector()))
+ if (!Selectors.insert(MethList->getMethod()->getSelector()).second)
continue;
- Result R(MethList->Method, Results.getBasePriority(MethList->Method),
- nullptr);
+ Result R(MethList->getMethod(),
+ Results.getBasePriority(MethList->getMethod()), nullptr);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
Results.MaybeAddResult(R, CurContext);
@@ -5858,7 +5862,7 @@ void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
for (const auto *D : TU->decls())
if (const auto *Category = dyn_cast<ObjCCategoryDecl>(D))
- if (CategoryNames.insert(Category->getIdentifier()))
+ if (CategoryNames.insert(Category->getIdentifier()).second)
Results.AddResult(Result(Category, Results.getBasePriority(Category),
nullptr),
CurContext, nullptr, false);
@@ -5896,7 +5900,7 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
while (Class) {
for (const auto *Cat : Class->visible_categories()) {
if ((!IgnoreImplemented || !Cat->getImplementation()) &&
- CategoryNames.insert(Cat->getIdentifier()))
+ CategoryNames.insert(Cat->getIdentifier()).second)
Results.AddResult(Result(Cat, Results.getBasePriority(Cat), nullptr),
CurContext, nullptr, false);
}
@@ -6217,7 +6221,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// Add the normal accessor -(type)key.
if (IsInstanceMethod &&
- KnownSelectors.insert(Selectors.getNullarySelector(PropName)) &&
+ KnownSelectors.insert(Selectors.getNullarySelector(PropName)).second &&
ReturnTypeMatchesProperty && !Property->getGetterMethodDecl()) {
if (ReturnType.isNull())
AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
@@ -6238,7 +6242,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Property->getType()->isBooleanType())))) {
std::string SelectorName = (Twine("is") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
+ .second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("BOOL");
@@ -6257,7 +6262,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
!Property->getSetterMethodDecl()) {
std::string SelectorName = (Twine("set") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6309,7 +6314,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
(ReturnType.isNull() || ReturnType->isIntegerType())) {
std::string SelectorName = (Twine("countOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
+ .second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSUInteger");
@@ -6332,7 +6338,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
std::string SelectorName
= (Twine("objectIn") + UpperKey + "AtIndex").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("id");
@@ -6359,7 +6365,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
std::string SelectorName
= (Twine(Property->getName()) + "AtIndexes").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSArray *");
@@ -6384,7 +6390,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
&Context.Idents.get("range")
};
- if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds))) {
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6418,7 +6424,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
&Context.Idents.get(SelectorName)
};
- if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds))) {
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6450,7 +6456,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
&Context.Idents.get("atIndexes")
};
- if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds))) {
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6478,7 +6484,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
std::string SelectorName
= (Twine("removeObjectFrom") + UpperKey + "AtIndex").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6500,7 +6506,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
std::string SelectorName
= (Twine("remove") + UpperKey + "AtIndexes").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6526,7 +6532,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
&Context.Idents.get("withObject")
};
- if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds))) {
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6559,7 +6565,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
&Context.Idents.get(SelectorName2)
};
- if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds))) {
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6592,7 +6598,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
->getName() == "NSEnumerator"))) {
std::string SelectorName = (Twine("enumeratorOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
+ .second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSEnumerator *");
@@ -6610,7 +6617,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
(ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
std::string SelectorName = (Twine("memberOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("object-type");
@@ -6641,7 +6648,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
std::string SelectorName
= (Twine("add") + UpperKey + Twine("Object")).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6663,7 +6670,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("add") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6685,7 +6692,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
std::string SelectorName
= (Twine("remove") + UpperKey + Twine("Object")).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6707,7 +6714,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("remove") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6728,7 +6735,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("intersect") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
@@ -6756,7 +6763,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
std::string SelectorName
= (Twine("keyPathsForValuesAffecting") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
+ .second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSSet *");
@@ -6777,7 +6785,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
std::string SelectorName
= (Twine("automaticallyNotifiesObserversOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
- if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))) {
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
+ .second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("BOOL");
@@ -6985,16 +6994,18 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
M != MEnd; ++M) {
for (ObjCMethodList *MethList = IsInstanceMethod ? &M->second.first :
&M->second.second;
- MethList && MethList->Method;
+ MethList && MethList->getMethod();
MethList = MethList->getNext()) {
- if (!isAcceptableObjCMethod(MethList->Method, MK_Any, SelIdents))
+ if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
if (AtParameterName) {
// Suggest parameter names we've seen before.
unsigned NumSelIdents = SelIdents.size();
- if (NumSelIdents && NumSelIdents <= MethList->Method->param_size()) {
- ParmVarDecl *Param = MethList->Method->parameters()[NumSelIdents-1];
+ if (NumSelIdents &&
+ NumSelIdents <= MethList->getMethod()->param_size()) {
+ ParmVarDecl *Param =
+ MethList->getMethod()->parameters()[NumSelIdents - 1];
if (Param->getIdentifier()) {
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
@@ -7007,8 +7018,8 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
continue;
}
- Result R(MethList->Method, Results.getBasePriority(MethList->Method),
- nullptr);
+ Result R(MethList->getMethod(),
+ Results.getBasePriority(MethList->getMethod()), nullptr);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
R.DeclaringEntity = true;
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index 87162273bbe6..007470344f19 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -152,7 +152,10 @@ static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
auto *TD = TST->getTemplateName().getAsTemplateDecl();
if (!TD)
continue;
- auto *BasePrimaryTemplate = cast<CXXRecordDecl>(TD->getTemplatedDecl());
+ auto *BasePrimaryTemplate =
+ dyn_cast_or_null<CXXRecordDecl>(TD->getTemplatedDecl());
+ if (!BasePrimaryTemplate)
+ continue;
// FIXME: Allow lookup into non-dependent bases of dependent bases, possibly
// by calling or integrating with the main LookupQualifiedName mechanism.
for (NamedDecl *ND : BasePrimaryTemplate->lookup(&II)) {
@@ -283,10 +286,10 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
if (CorrectedII) {
- TypeNameValidatorCCC Validator(true, isClassName);
- TypoCorrection Correction = CorrectTypo(Result.getLookupNameInfo(),
- Kind, S, SS, Validator,
- CTK_ErrorRecovery);
+ TypoCorrection Correction = CorrectTypo(
+ Result.getLookupNameInfo(), Kind, S, SS,
+ llvm::make_unique<TypeNameValidatorCCC>(true, isClassName),
+ CTK_ErrorRecovery);
IdentifierInfo *NewII = Correction.getCorrectionAsIdentifierInfo();
TemplateTy Template;
bool MemberOfUnknownSpecialization;
@@ -377,6 +380,7 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
DiagnoseUseOfDecl(IIDecl, NameLoc);
T = Context.getTypeDeclType(TD);
+ MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
// NOTE: avoid constructing an ElaboratedType(Loc) if this is a
// constructor or destructor name (in such a case, the scope specifier
@@ -494,6 +498,9 @@ DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
/// @endcode
bool Sema::isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S) {
if (CurContext->isRecord()) {
+ if (SS->getScopeRep()->getKind() == NestedNameSpecifier::Super)
+ return true;
+
const Type *Ty = SS->getScopeRep()->getAsType();
CXXRecordDecl *RD = cast<CXXRecordDecl>(CurContext);
@@ -516,10 +523,11 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
// There may have been a typo in the name of the type. Look up typo
// results, in case we have something that we can suggest.
- TypeNameValidatorCCC Validator(false, false, AllowClassTemplates);
- if (TypoCorrection Corrected = CorrectTypo(DeclarationNameInfo(II, IILoc),
- LookupOrdinaryName, S, SS,
- Validator, CTK_ErrorRecovery)) {
+ if (TypoCorrection Corrected =
+ CorrectTypo(DeclarationNameInfo(II, IILoc), LookupOrdinaryName, S, SS,
+ llvm::make_unique<TypeNameValidatorCCC>(
+ false, false, AllowClassTemplates),
+ CTK_ErrorRecovery)) {
if (Corrected.isKeyword()) {
// We corrected to a keyword.
diagnoseTypo(Corrected, PDiag(diag::err_unknown_typename_suggest) << II);
@@ -679,13 +687,11 @@ static ParsedType buildNestedType(Sema &S, CXXScopeSpec &SS,
return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
-Sema::NameClassification Sema::ClassifyName(Scope *S,
- CXXScopeSpec &SS,
- IdentifierInfo *&Name,
- SourceLocation NameLoc,
- const Token &NextToken,
- bool IsAddressOfOperand,
- CorrectionCandidateCallback *CCC) {
+Sema::NameClassification
+Sema::ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
+ SourceLocation NameLoc, const Token &NextToken,
+ bool IsAddressOfOperand,
+ std::unique_ptr<CorrectionCandidateCallback> CCC) {
DeclarationNameInfo NameInfo(Name, NameLoc);
ObjCMethodDecl *CurMethod = getCurMethodDecl();
@@ -762,7 +768,7 @@ Corrected:
SecondTry = true;
if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(),
Result.getLookupKind(), S,
- &SS, *CCC,
+ &SS, std::move(CCC),
CTK_ErrorRecovery)) {
unsigned UnqualifiedDiag = diag::err_undeclared_var_use_suggest;
unsigned QualifiedDiag = diag::err_no_member_suggest;
@@ -925,6 +931,7 @@ Corrected:
NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl();
if (TypeDecl *Type = dyn_cast<TypeDecl>(FirstDecl)) {
DiagnoseUseOfDecl(Type, NameLoc);
+ MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
QualType T = Context.getTypeDeclType(Type);
if (SS.isNotEmpty())
return buildNestedType(*this, SS, T, NameLoc);
@@ -1392,10 +1399,22 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
if (isa<LabelDecl>(D))
return true;
+
+ // Except for labels, we only care about unused decls that are local to
+ // functions.
+ bool WithinFunction = D->getDeclContext()->isFunctionOrMethod();
+ if (const auto *R = dyn_cast<CXXRecordDecl>(D->getDeclContext()))
+ // For dependent types, the diagnostic is deferred.
+ WithinFunction =
+ WithinFunction || (R->isLocalClass() && !R->isDependentType());
+ if (!WithinFunction)
+ return false;
+
+ if (isa<TypedefNameDecl>(D))
+ return true;
// White-list anything that isn't a local variable.
- if (!isa<VarDecl>(D) || isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D) ||
- !D->getDeclContext()->isFunctionOrMethod())
+ if (!isa<VarDecl>(D) || isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D))
return false;
// Types of valid local variables should be complete, so this should succeed.
@@ -1458,11 +1477,30 @@ static void GenerateFixForUnusedDecl(const NamedDecl *D, ASTContext &Ctx,
return;
}
+void Sema::DiagnoseUnusedNestedTypedefs(const RecordDecl *D) {
+ if (D->getTypeForDecl()->isDependentType())
+ return;
+
+ for (auto *TmpD : D->decls()) {
+ if (const auto *T = dyn_cast<TypedefNameDecl>(TmpD))
+ DiagnoseUnusedDecl(T);
+ else if(const auto *R = dyn_cast<RecordDecl>(TmpD))
+ DiagnoseUnusedNestedTypedefs(R);
+ }
+}
+
/// DiagnoseUnusedDecl - Emit warnings about declarations that are not used
/// unless they are marked attr(unused).
void Sema::DiagnoseUnusedDecl(const NamedDecl *D) {
if (!ShouldDiagnoseUnusedDecl(D))
return;
+
+ if (auto *TD = dyn_cast<TypedefNameDecl>(D)) {
+ // typedefs can be referenced later on, so the diagnostics are emitted
+ // at end-of-translation-unit.
+ UnusedLocalTypedefNameCandidates.insert(TD);
+ return;
+ }
FixItHint Hint;
GenerateFixForUnusedDecl(D, Context, Hint);
@@ -1481,8 +1519,14 @@ void Sema::DiagnoseUnusedDecl(const NamedDecl *D) {
static void CheckPoppedLabel(LabelDecl *L, Sema &S) {
// Verify that we have no forward references left. If so, there was a goto
// or address of a label taken, but no definition of it. Label fwd
- // definitions are indicated with a null substmt.
- if (L->getStmt() == nullptr)
+ // definitions are indicated with a null substmt which is also not a resolved
+ // MS inline assembly label name.
+ bool Diagnose = false;
+ if (L->isMSAsmLabel())
+ Diagnose = !L->isResolvedMSAsmLabel();
+ else
+ Diagnose = L->getStmt() == nullptr;
+ if (Diagnose)
S.Diag(L->getLocation(), diag::err_undeclared_label_use) <<L->getDeclName();
}
@@ -1502,8 +1546,11 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
if (!D->getDeclName()) continue;
// Diagnose unused variables in this scope.
- if (!S->hasUnrecoverableErrorOccurred())
+ if (!S->hasUnrecoverableErrorOccurred()) {
DiagnoseUnusedDecl(D);
+ if (const auto *RD = dyn_cast<RecordDecl>(D))
+ DiagnoseUnusedNestedTypedefs(RD);
+ }
// If this was a forward reference to a label, verify it was defined.
if (LabelDecl *LD = dyn_cast<LabelDecl>(D))
@@ -1537,10 +1584,10 @@ ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *&Id,
if (!IDecl && DoTypoCorrection) {
// Perform typo correction at the given location, but only if we
// find an Objective-C class name.
- DeclFilterCCC<ObjCInterfaceDecl> Validator;
- if (TypoCorrection C = CorrectTypo(DeclarationNameInfo(Id, IdLoc),
- LookupOrdinaryName, TUScope, nullptr,
- Validator, CTK_ErrorRecovery)) {
+ if (TypoCorrection C = CorrectTypo(
+ DeclarationNameInfo(Id, IdLoc), LookupOrdinaryName, TUScope, nullptr,
+ llvm::make_unique<DeclFilterCCC<ObjCInterfaceDecl>>(),
+ CTK_ErrorRecovery)) {
diagnoseTypo(C, PDiag(diag::err_undef_interface_suggest) << Id);
IDecl = C.getCorrectionDeclAs<ObjCInterfaceDecl>();
Id = IDecl->getIdentifier();
@@ -1620,32 +1667,30 @@ static StringRef getHeaderName(ASTContext::GetBuiltinTypeError Error) {
/// file scope. lazily create a decl for it. ForRedeclaration is true
/// if we're creating this built-in in anticipation of redeclaring the
/// built-in.
-NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned bid,
+NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc) {
LookupPredefedObjCSuperType(*this, S, II);
-
- Builtin::ID BID = (Builtin::ID)bid;
ASTContext::GetBuiltinTypeError Error;
- QualType R = Context.GetBuiltinType(BID, Error);
+ QualType R = Context.GetBuiltinType(ID, Error);
if (Error) {
if (ForRedeclaration)
Diag(Loc, diag::warn_implicit_decl_requires_sysheader)
<< getHeaderName(Error)
- << Context.BuiltinInfo.GetName(BID);
+ << Context.BuiltinInfo.GetName(ID);
return nullptr;
}
- if (!ForRedeclaration && Context.BuiltinInfo.isPredefinedLibFunction(BID)) {
+ if (!ForRedeclaration && Context.BuiltinInfo.isPredefinedLibFunction(ID)) {
Diag(Loc, diag::ext_implicit_lib_function_decl)
- << Context.BuiltinInfo.GetName(BID)
+ << Context.BuiltinInfo.GetName(ID)
<< R;
- if (Context.BuiltinInfo.getHeaderName(BID) &&
+ if (Context.BuiltinInfo.getHeaderName(ID) &&
!Diags.isIgnored(diag::ext_implicit_lib_function_decl, Loc))
Diag(Loc, diag::note_include_header_or_declare)
- << Context.BuiltinInfo.getHeaderName(BID)
- << Context.BuiltinInfo.GetName(BID);
+ << Context.BuiltinInfo.getHeaderName(ID)
+ << Context.BuiltinInfo.GetName(ID);
}
DeclContext *Parent = Context.getTranslationUnitDecl();
@@ -1725,6 +1770,43 @@ static void filterNonConflictingPreviousDecls(ASTContext &context,
filter.done();
}
+/// Typedef declarations don't have linkage, but they still denote the same
+/// entity if their types are the same.
+/// FIXME: This is notionally doing the same thing as ASTReaderDecl's
+/// isSameEntity.
+static void filterNonConflictingPreviousTypedefDecls(ASTContext &Context,
+ TypedefNameDecl *Decl,
+ LookupResult &Previous) {
+ // This is only interesting when modules are enabled.
+ if (!Context.getLangOpts().Modules)
+ return;
+
+ // Empty sets are uninteresting.
+ if (Previous.empty())
+ return;
+
+ LookupResult::Filter Filter = Previous.makeFilter();
+ while (Filter.hasNext()) {
+ NamedDecl *Old = Filter.next();
+
+ // Non-hidden declarations are never ignored.
+ if (!Old->isHidden())
+ continue;
+
+ // Declarations of the same entity are not ignored, even if they have
+ // different linkages.
+ if (auto *OldTD = dyn_cast<TypedefNameDecl>(Old))
+ if (Context.hasSameType(OldTD->getUnderlyingType(),
+ Decl->getUnderlyingType()))
+ continue;
+
+ if (!Old->isExternallyVisible())
+ Filter.erase();
+ }
+
+ Filter.done();
+}
+
bool Sema::isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New) {
QualType OldType;
if (TypedefNameDecl *OldTypedef = dyn_cast<TypedefNameDecl>(Old))
@@ -2072,6 +2154,14 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
NewAttr = S.mergeMSInheritanceAttr(D, IA->getRange(), IA->getBestCase(),
AttrSpellingListIndex,
IA->getSemanticSpelling());
+ else if (const auto *AA = dyn_cast<AlwaysInlineAttr>(Attr))
+ NewAttr = S.mergeAlwaysInlineAttr(D, AA->getRange(),
+ &S.Context.Idents.get(AA->getSpelling()),
+ AttrSpellingListIndex);
+ else if (const auto *MA = dyn_cast<MinSizeAttr>(Attr))
+ NewAttr = S.mergeMinSizeAttr(D, MA->getRange(), AttrSpellingListIndex);
+ else if (const auto *OA = dyn_cast<OptimizeNoneAttr>(Attr))
+ NewAttr = S.mergeOptimizeNoneAttr(D, OA->getRange(), AttrSpellingListIndex);
else if (isa<AlignedAttr>(Attr))
// AlignedAttrs are handled separately, because we need to handle all
// such attributes on a declaration at the same time.
@@ -3149,8 +3239,15 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
}
// Merge the types.
- MergeVarDeclTypes(New, Old, mergeTypeWithPrevious(*this, New, Old, Previous));
+ VarDecl *MostRecent = Old->getMostRecentDecl();
+ if (MostRecent != Old) {
+ MergeVarDeclTypes(New, MostRecent,
+ mergeTypeWithPrevious(*this, New, MostRecent, Previous));
+ if (New->isInvalidDecl())
+ return;
+ }
+ MergeVarDeclTypes(New, Old, mergeTypeWithPrevious(*this, New, Old, Previous));
if (New->isInvalidDecl())
return;
@@ -3195,12 +3292,12 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
// Check if extern is followed by non-extern and vice-versa.
if (New->hasExternalStorage() &&
- !Old->hasLinkage() && Old->isLocalVarDecl()) {
+ !Old->hasLinkage() && Old->isLocalVarDeclOrParm()) {
Diag(New->getLocation(), diag::err_extern_non_extern) << New->getDeclName();
Diag(OldLocation, PrevDiag);
return New->setInvalidDecl();
}
- if (Old->hasLinkage() && New->isLocalVarDecl() &&
+ if (Old->hasLinkage() && New->isLocalVarDeclOrParm() &&
!New->hasExternalStorage()) {
Diag(New->getLocation(), diag::err_non_extern_extern) << New->getDeclName();
Diag(OldLocation, PrevDiag);
@@ -3407,21 +3504,39 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
}
}
- // Check for Microsoft C extension: anonymous struct member.
- if (getLangOpts().MicrosoftExt && !getLangOpts().CPlusPlus &&
- CurContext->isRecord() &&
+ // C11 6.7.2.1p2:
+ // A struct-declaration that does not declare an anonymous structure or
+ // anonymous union shall contain a struct-declarator-list.
+ //
+ // This rule also existed in C89 and C99; the grammar for struct-declaration
+ // did not permit a struct-declaration without a struct-declarator-list.
+ if (!getLangOpts().CPlusPlus && CurContext->isRecord() &&
DS.getStorageClassSpec() == DeclSpec::SCS_unspecified) {
- // Handle 2 kinds of anonymous struct:
+ // Check for Microsoft C extension: anonymous struct/union member.
+ // Handle 2 kinds of anonymous struct/union:
// struct STRUCT;
+ // union UNION;
// and
// STRUCT_TYPE; <- where STRUCT_TYPE is a typedef struct.
- RecordDecl *Record = dyn_cast_or_null<RecordDecl>(Tag);
- if ((Record && Record->getDeclName() && !Record->isCompleteDefinition()) ||
- (DS.getTypeSpecType() == DeclSpec::TST_typename &&
- DS.getRepAsType().get()->isStructureType())) {
- Diag(DS.getLocStart(), diag::ext_ms_anonymous_struct)
- << DS.getSourceRange();
- return BuildMicrosoftCAnonymousStruct(S, DS, Record);
+ // UNION_TYPE; <- where UNION_TYPE is a typedef union.
+ if ((Tag && Tag->getDeclName()) ||
+ DS.getTypeSpecType() == DeclSpec::TST_typename) {
+ RecordDecl *Record = nullptr;
+ if (Tag)
+ Record = dyn_cast<RecordDecl>(Tag);
+ else if (const RecordType *RT =
+ DS.getRepAsType().get()->getAsStructureType())
+ Record = RT->getDecl();
+ else if (const RecordType *UT = DS.getRepAsType().get()->getAsUnionType())
+ Record = UT->getDecl();
+
+ if (Record && getLangOpts().MicrosoftExt) {
+ Diag(DS.getLocStart(), diag::ext_ms_anonymous_record)
+ << Record->isUnion() << DS.getSourceRange();
+ return BuildMicrosoftCAnonymousStruct(S, DS, Record);
+ }
+
+ DeclaresAnything = false;
}
}
@@ -3622,10 +3737,12 @@ static bool InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S,
for (unsigned i = 0; i < Chaining.size(); i++)
NamedChain[i] = Chaining[i];
- IndirectFieldDecl* IndirectField =
- IndirectFieldDecl::Create(SemaRef.Context, Owner, VD->getLocation(),
- VD->getIdentifier(), VD->getType(),
- NamedChain, Chaining.size());
+ IndirectFieldDecl *IndirectField = IndirectFieldDecl::Create(
+ SemaRef.Context, Owner, VD->getLocation(), VD->getIdentifier(),
+ VD->getType(), NamedChain, Chaining.size());
+
+ for (const auto *Attr : VD->attrs())
+ IndirectField->addAttr(Attr->clone(SemaRef.Context));
IndirectField->setAccess(AS);
IndirectField->setImplicit();
@@ -3893,7 +4010,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
FieldCollector->Add(cast<FieldDecl>(Anon));
} else {
DeclSpec::SCS SCSpec = DS.getStorageClassSpec();
- VarDecl::StorageClass SC = StorageClassSpecToVarDeclStorageClass(DS);
+ StorageClass SC = StorageClassSpecToVarDeclStorageClass(DS);
if (SCSpec == DeclSpec::SCS_mutable) {
// mutable can only appear on non-static class members, so it's always
// an error here
@@ -3962,28 +4079,28 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
///
/// void foo() {
/// B var;
-/// var.a = 3;
+/// var.a = 3;
/// }
///
Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record) {
-
- // If there is no Record, get the record via the typedef.
- if (!Record)
- Record = DS.getRepAsType().get()->getAsStructureType()->getDecl();
+ assert(Record && "expected a record!");
// Mock up a declarator.
Declarator Dc(DS, Declarator::TypeNameContext);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct");
+ auto *ParentDecl = cast<RecordDecl>(CurContext);
+ QualType RecTy = Context.getTypeDeclType(Record);
+
// Create a declaration for this anonymous struct.
NamedDecl *Anon = FieldDecl::Create(Context,
- cast<RecordDecl>(CurContext),
+ ParentDecl,
DS.getLocStart(),
DS.getLocStart(),
/*IdentifierInfo=*/nullptr,
- Context.getTypeDeclType(Record),
+ RecTy,
TInfo,
/*BitWidth=*/nullptr, /*Mutable=*/false,
/*InitStyle=*/ICIS_NoInit);
@@ -3999,10 +4116,13 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
Chain.push_back(Anon);
RecordDecl *RecordDef = Record->getDefinition();
- if (!RecordDef || InjectAnonymousStructOrUnionMembers(*this, S, CurContext,
- RecordDef, AS_none,
- Chain, true))
+ if (RequireCompleteType(Anon->getLocation(), RecTy,
+ diag::err_field_incomplete) ||
+ InjectAnonymousStructOrUnionMembers(*this, S, CurContext, RecordDef,
+ AS_none, Chain, true)) {
Anon->setInvalidDecl();
+ ParentDecl->setInvalidDecl();
+ }
return Anon;
}
@@ -4835,7 +4955,7 @@ Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
// in an outer scope, it isn't the same thing.
FilterLookupForScope(Previous, DC, S, /*ConsiderLinkage*/false,
/*AllowInlineNamespace*/false);
- filterNonConflictingPreviousDecls(Context, NewTD, Previous);
+ filterNonConflictingPreviousTypedefDecls(Context, NewTD, Previous);
if (!Previous.empty()) {
Redeclaration = true;
MergeTypedefNameDecl(NewTD, Previous);
@@ -4995,14 +5115,7 @@ static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) {
}
// dll attributes require external linkage.
- if (const DLLImportAttr *Attr = ND.getAttr<DLLImportAttr>()) {
- if (!ND.isExternallyVisible()) {
- S.Diag(ND.getLocation(), diag::err_attribute_dll_not_extern)
- << &ND << Attr;
- ND.setInvalidDecl();
- }
- }
- if (const DLLExportAttr *Attr = ND.getAttr<DLLExportAttr>()) {
+ if (const InheritableAttr *Attr = getDLLAttr(&ND)) {
if (!ND.isExternallyVisible()) {
S.Diag(ND.getLocation(), diag::err_attribute_dll_not_extern)
<< &ND << Attr;
@@ -5064,17 +5177,22 @@ static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
}
// A redeclaration is not allowed to drop a dllimport attribute, the only
- // exception being inline function definitions.
+ // exceptions being inline function definitions, local extern declarations,
+ // and qualified friend declarations.
// NB: MSVC converts such a declaration to dllexport.
- bool IsInline = false, IsStaticDataMember = false;
+ bool IsInline = false, IsStaticDataMember = false, IsQualifiedFriend = false;
if (const auto *VD = dyn_cast<VarDecl>(NewDecl))
// Ignore static data because out-of-line definitions are diagnosed
// separately.
IsStaticDataMember = VD->isStaticDataMember();
- else if (const auto *FD = dyn_cast<FunctionDecl>(NewDecl))
+ else if (const auto *FD = dyn_cast<FunctionDecl>(NewDecl)) {
IsInline = FD->isInlined();
+ IsQualifiedFriend = FD->getQualifier() &&
+ FD->getFriendObjectKind() == Decl::FOK_Declared;
+ }
- if (OldImportAttr && !HasNewAttr && !IsInline && !IsStaticDataMember) {
+ if (OldImportAttr && !HasNewAttr && !IsInline && !IsStaticDataMember &&
+ !NewDecl->isLocalExternDecl() && !IsQualifiedFriend) {
S.Diag(NewDecl->getLocation(),
diag::warn_redeclaration_without_attribute_prev_attribute_ignored)
<< NewDecl << OldImportAttr;
@@ -5082,6 +5200,14 @@ static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
S.Diag(OldImportAttr->getLocation(), diag::note_previous_attribute);
OldDecl->dropAttr<DLLImportAttr>();
NewDecl->dropAttr<DLLImportAttr>();
+ } else if (IsInline && OldImportAttr &&
+ !S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ // In MinGW, seeing a function declared inline drops the dllimport attribute.
+ OldDecl->dropAttr<DLLImportAttr>();
+ NewDecl->dropAttr<DLLImportAttr>();
+ S.Diag(NewDecl->getLocation(),
+ diag::warn_dllimport_dropped_from_inline_function)
+ << NewDecl << OldImportAttr;
}
}
@@ -5222,8 +5348,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
DeclarationName Name = GetNameForDeclarator(D).getName();
DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpec();
- VarDecl::StorageClass SC =
- StorageClassSpecToVarDeclStorageClass(D.getDeclSpec());
+ StorageClass SC = StorageClassSpecToVarDeclStorageClass(D.getDeclSpec());
// dllimport globals without explicit storage class are treated as extern. We
// have to change the storage class this early to get the right DeclContext.
@@ -5434,7 +5559,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Only C++1y supports variable templates (N3651).
Diag(D.getIdentifierLoc(),
- getLangOpts().CPlusPlus1y
+ getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_variable_template
: diag::ext_variable_template);
}
@@ -5503,22 +5628,20 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewVD->setLocalExternDecl();
if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec()) {
- if (NewVD->hasLocalStorage()) {
- // C++11 [dcl.stc]p4:
- // When thread_local is applied to a variable of block scope the
- // storage-class-specifier static is implied if it does not appear
- // explicitly.
- // Core issue: 'static' is not implied if the variable is declared
- // 'extern'.
- if (SCSpec == DeclSpec::SCS_unspecified &&
- TSCS == DeclSpec::TSCS_thread_local &&
- DC->isFunctionOrMethod())
- NewVD->setTSCSpec(TSCS);
- else
- Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
- diag::err_thread_non_global)
- << DeclSpec::getSpecifierName(TSCS);
- } else if (!Context.getTargetInfo().isTLSSupported())
+ // C++11 [dcl.stc]p4:
+ // When thread_local is applied to a variable of block scope the
+ // storage-class-specifier static is implied if it does not appear
+ // explicitly.
+ // Core issue: 'static' is not implied if the variable is declared
+ // 'extern'.
+ if (NewVD->hasLocalStorage() &&
+ (SCSpec != DeclSpec::SCS_unspecified ||
+ TSCS != DeclSpec::TSCS_thread_local ||
+ !DC->isFunctionOrMethod()))
+ Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
+ diag::err_thread_non_global)
+ << DeclSpec::getSpecifierName(TSCS);
+ else if (!Context.getTargetInfo().isTLSSupported())
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_thread_unsupported);
else
@@ -6198,7 +6321,7 @@ static void ReportOverrides(Sema& S, unsigned DiagID, const CXXMethodDecl *MD,
/// AddOverriddenMethods - See if a method overrides any in the base classes,
/// and if so, check that it's a valid override and remember it.
bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
- // Look for virtual methods in base classes that this method might override.
+ // Look for methods in base classes that this method might override.
CXXBasePaths Paths;
FindOverriddenMethodData Data;
Data.Method = MD;
@@ -6320,8 +6443,6 @@ static NamedDecl *DiagnoseInvalidRedeclaration(
assert(!Prev.isAmbiguous() &&
"Cannot have an ambiguity in previous-declaration lookup");
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
- DifferentNameValidatorCCC Validator(SemaRef.Context, NewFD,
- MD ? MD->getParent() : nullptr);
if (!Prev.empty()) {
for (LookupResult::iterator Func = Prev.begin(), FuncEnd = Prev.end();
Func != FuncEnd; ++Func) {
@@ -6337,9 +6458,11 @@ static NamedDecl *DiagnoseInvalidRedeclaration(
}
// If the qualified name lookup yielded nothing, try typo correction
} else if ((Correction = SemaRef.CorrectTypo(
- Prev.getLookupNameInfo(), Prev.getLookupKind(), S,
- &ExtraArgs.D.getCXXScopeSpec(), Validator,
- Sema::CTK_ErrorRecovery, IsLocalFriend ? nullptr : NewDC))) {
+ Prev.getLookupNameInfo(), Prev.getLookupKind(), S,
+ &ExtraArgs.D.getCXXScopeSpec(),
+ llvm::make_unique<DifferentNameValidatorCCC>(
+ SemaRef.Context, NewFD, MD ? MD->getParent() : nullptr),
+ Sema::CTK_ErrorRecovery, IsLocalFriend ? nullptr : NewDC))) {
// Set up everything for the call to ActOnFunctionDeclarator
ExtraArgs.D.SetIdentifier(Correction.getCorrectionAsIdentifierInfo(),
ExtraArgs.D.getIdentifierLoc());
@@ -6436,8 +6559,7 @@ static NamedDecl *DiagnoseInvalidRedeclaration(
return nullptr;
}
-static FunctionDecl::StorageClass getFunctionStorageClass(Sema &SemaRef,
- Declarator &D) {
+static StorageClass getFunctionStorageClass(Sema &SemaRef, Declarator &D) {
switch (D.getDeclSpec().getStorageClassSpec()) {
default: llvm_unreachable("Unknown storage class!");
case DeclSpec::SCS_auto:
@@ -6475,7 +6597,7 @@ static FunctionDecl::StorageClass getFunctionStorageClass(Sema &SemaRef,
static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
DeclContext *DC, QualType &R,
TypeSourceInfo *TInfo,
- FunctionDecl::StorageClass SC,
+ StorageClass SC,
bool &IsVirtualOkay) {
DeclarationNameInfo NameInfo = SemaRef.GetNameForDeclarator(D);
DeclarationName Name = NameInfo.getName();
@@ -6500,9 +6622,6 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
if (D.isInvalidType())
NewFD->setInvalidDecl();
- // Set the lexical context.
- NewFD->setLexicalDeclContext(SemaRef.CurContext);
-
return NewFD;
}
@@ -6602,6 +6721,11 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
IsVirtualOkay = !Ret->isStatic();
return Ret;
} else {
+ bool isFriend =
+ SemaRef.getLangOpts().CPlusPlus && D.getDeclSpec().isFriendSpecified();
+ if (!isFriend && SemaRef.CurContext->isRecord())
+ return nullptr;
+
// Determine whether the function was written with a
// prototype. This true when:
// - we're in C++ (where every function has a prototype),
@@ -6655,7 +6779,7 @@ static void checkIsValidOpenCLKernelParameter(
Sema &S,
Declarator &D,
ParmVarDecl *Param,
- llvm::SmallPtrSet<const Type *, 16> &ValidTypes) {
+ llvm::SmallPtrSetImpl<const Type *> &ValidTypes) {
QualType PT = Param->getType();
// Cache the valid types we encounter to avoid rechecking structs that are
@@ -6802,7 +6926,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// TODO: consider using NameInfo for diagnostic.
DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
DeclarationName Name = NameInfo.getName();
- FunctionDecl::StorageClass SC = getFunctionStorageClass(*this, D);
+ StorageClass SC = getFunctionStorageClass(*this, D);
if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec())
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
@@ -6990,12 +7114,12 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->setVirtualAsWritten(true);
}
- if (getLangOpts().CPlusPlus1y &&
+ if (getLangOpts().CPlusPlus14 &&
NewFD->getReturnType()->isUndeducedType())
Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_auto_fn_virtual);
}
- if (getLangOpts().CPlusPlus1y &&
+ if (getLangOpts().CPlusPlus14 &&
(NewFD->isDependentContext() ||
(isFriend && CurContext->isDependentContext())) &&
NewFD->getReturnType()->isUndeducedType()) {
@@ -7126,12 +7250,10 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
const FunctionProtoType *FPT = R->getAs<FunctionProtoType>();
if ((Name.getCXXOverloadedOperator() == OO_Delete ||
Name.getCXXOverloadedOperator() == OO_Array_Delete) &&
- getLangOpts().CPlusPlus11 && FPT && !FPT->hasExceptionSpec()) {
- FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
- EPI.ExceptionSpecType = EST_BasicNoexcept;
- NewFD->setType(Context.getFunctionType(FPT->getReturnType(),
- FPT->getParamTypes(), EPI));
- }
+ getLangOpts().CPlusPlus11 && FPT && !FPT->hasExceptionSpec())
+ NewFD->setType(Context.getFunctionType(
+ FPT->getReturnType(), FPT->getParamTypes(),
+ FPT->getExtProtoInfo().withExceptionSpec(EST_BasicNoexcept)));
}
// Filter out previous declarations that don't match the scope.
@@ -7232,7 +7354,9 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
CodeSegStack.CurrentValue->getString(),
CodeSegStack.CurrentPragmaLocation));
if (UnifySection(CodeSegStack.CurrentValue->getString(),
- PSF_Implicit | PSF_Execute | PSF_Read, NewFD))
+ ASTContext::PSF_Implicit | ASTContext::PSF_Execute |
+ ASTContext::PSF_Read,
+ NewFD))
NewFD->dropAttr<SectionAttr>();
}
@@ -7283,6 +7407,22 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
assert((NewFD->isInvalidDecl() || !D.isRedeclaration() ||
Previous.getResultKind() != LookupResult::FoundOverloaded) &&
"previous declaration set still overloaded");
+
+ // Diagnose no-prototype function declarations with calling conventions that
+ // don't support variadic calls. Only do this in C and do it after merging
+ // possibly prototyped redeclarations.
+ const FunctionType *FT = NewFD->getType()->castAs<FunctionType>();
+ if (isa<FunctionNoProtoType>(FT) && !D.isFunctionDefinition()) {
+ CallingConv CC = FT->getExtInfo().getCC();
+ if (!supportsVariadicCall(CC)) {
+ // Windows system headers sometimes accidentally use stdcall without
+ // (void) parameters, so we relax this to a warning.
+ int DiagID =
+ CC == CC_X86StdCall ? diag::warn_cconv_knr : diag::err_cconv_knr;
+ Diag(NewFD->getLocation(), DiagID)
+ << FunctionType::getNameForCallConv(CC);
+ }
+ }
} else {
// C++11 [replacement.functions]p3:
// The program's definitions shall not be specified as inline.
@@ -7749,12 +7889,12 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// This rule is not present in C++1y, so we produce a backwards
// compatibility warning whenever it happens in C++11.
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
- if (!getLangOpts().CPlusPlus1y && MD && MD->isConstexpr() &&
+ if (!getLangOpts().CPlusPlus14 && MD && MD->isConstexpr() &&
!MD->isStatic() && !isa<CXXConstructorDecl>(MD) &&
(MD->getTypeQualifiers() & Qualifiers::Const) == 0) {
CXXMethodDecl *OldMD = nullptr;
if (OldDecl)
- OldMD = dyn_cast<CXXMethodDecl>(OldDecl->getAsFunction());
+ OldMD = dyn_cast_or_null<CXXMethodDecl>(OldDecl->getAsFunction());
if (!OldMD || !OldMD->isStatic()) {
const FunctionProtoType *FPT =
MD->getType()->castAs<FunctionProtoType>();
@@ -7771,7 +7911,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
.IgnoreParens().getAs<FunctionTypeLoc>())
AddConstLoc = getLocForEndOfToken(FTL.getRParenLoc());
- Diag(MD->getLocation(), diag::warn_cxx1y_compat_constexpr_not_const)
+ Diag(MD->getLocation(), diag::warn_cxx14_compat_constexpr_not_const)
<< FixItHint::CreateInsertion(AddConstLoc, " const");
}
}
@@ -7838,6 +7978,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
}
// Semantic checking for this function declaration (in isolation).
+
if (getLangOpts().CPlusPlus) {
// C++-specific checks.
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(NewFD)) {
@@ -7918,7 +8059,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// the function returns a UDT (class, struct, or union type) that is not C
// compatible, and if it does, warn the user.
// But, issue any diagnostic on the first declaration only.
- if (NewFD->isExternC() && Previous.empty()) {
+ if (Previous.empty() && NewFD->isExternC()) {
QualType R = NewFD->getReturnType();
if (R->isIncompleteType() && !R->isVoidType())
Diag(NewFD->getLocation(), diag::warn_return_value_udt_incomplete)
@@ -8120,6 +8261,8 @@ namespace {
bool isPODType;
bool isReferenceType;
+ bool isInitList;
+ llvm::SmallVector<unsigned, 4> InitFieldIndex;
public:
typedef EvaluatedExprVisitor<SelfReferenceChecker> Inherited;
@@ -8128,6 +8271,7 @@ namespace {
isPODType = false;
isRecordType = false;
isReferenceType = false;
+ isInitList = false;
if (ValueDecl *VD = dyn_cast<ValueDecl>(OrigDecl)) {
isPODType = VD->getType().isPODType(S.Context);
isRecordType = VD->getType()->isRecordType();
@@ -8135,25 +8279,122 @@ namespace {
}
}
+ // For most expressions, just call the visitor. For initializer lists,
+ // track the index of the field being initialized since fields are
+ // initialized in order allowing use of previously initialized fields.
+ void CheckExpr(Expr *E) {
+ InitListExpr *InitList = dyn_cast<InitListExpr>(E);
+ if (!InitList) {
+ Visit(E);
+ return;
+ }
+
+ // Track and increment the index here.
+ isInitList = true;
+ InitFieldIndex.push_back(0);
+ for (auto Child : InitList->children()) {
+ CheckExpr(cast<Expr>(Child));
+ ++InitFieldIndex.back();
+ }
+ InitFieldIndex.pop_back();
+ }
+
+ // Returns true if MemberExpr is checked and no futher checking is needed.
+ // Returns false if additional checking is required.
+ bool CheckInitListMemberExpr(MemberExpr *E, bool CheckReference) {
+ llvm::SmallVector<FieldDecl*, 4> Fields;
+ Expr *Base = E;
+ bool ReferenceField = false;
+
+ // Get the field memebers used.
+ while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
+ FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
+ if (!FD)
+ return false;
+ Fields.push_back(FD);
+ if (FD->getType()->isReferenceType())
+ ReferenceField = true;
+ Base = ME->getBase()->IgnoreParenImpCasts();
+ }
+
+ // Keep checking only if the base Decl is the same.
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base);
+ if (!DRE || DRE->getDecl() != OrigDecl)
+ return false;
+
+ // A reference field can be bound to an unininitialized field.
+ if (CheckReference && !ReferenceField)
+ return true;
+
+ // Convert FieldDecls to their index number.
+ llvm::SmallVector<unsigned, 4> UsedFieldIndex;
+ for (auto I = Fields.rbegin(), E = Fields.rend(); I != E; ++I) {
+ UsedFieldIndex.push_back((*I)->getFieldIndex());
+ }
+
+ // See if a warning is needed by checking the first difference in index
+ // numbers. If field being used has index less than the field being
+ // initialized, then the use is safe.
+ for (auto UsedIter = UsedFieldIndex.begin(),
+ UsedEnd = UsedFieldIndex.end(),
+ OrigIter = InitFieldIndex.begin(),
+ OrigEnd = InitFieldIndex.end();
+ UsedIter != UsedEnd && OrigIter != OrigEnd; ++UsedIter, ++OrigIter) {
+ if (*UsedIter < *OrigIter)
+ return true;
+ if (*UsedIter > *OrigIter)
+ break;
+ }
+
+ // TODO: Add a different warning which will print the field names.
+ HandleDeclRefExpr(DRE);
+ return true;
+ }
+
// For most expressions, the cast is directly above the DeclRefExpr.
// For conditional operators, the cast can be outside the conditional
// operator if both expressions are DeclRefExpr's.
void HandleValue(Expr *E) {
- if (isReferenceType)
- return;
- E = E->IgnoreParenImpCasts();
+ E = E->IgnoreParens();
if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(E)) {
HandleDeclRefExpr(DRE);
return;
}
if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ Visit(CO->getCond());
HandleValue(CO->getTrueExpr());
HandleValue(CO->getFalseExpr());
return;
}
+ if (BinaryConditionalOperator *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ Visit(BCO->getCond());
+ HandleValue(BCO->getFalseExpr());
+ return;
+ }
+
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+ HandleValue(OVE->getSourceExpr());
+ return;
+ }
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma) {
+ Visit(BO->getLHS());
+ HandleValue(BO->getRHS());
+ return;
+ }
+ }
+
if (isa<MemberExpr>(E)) {
+ if (isInitList) {
+ if (CheckInitListMemberExpr(cast<MemberExpr>(E),
+ false /*CheckReference*/))
+ return;
+ }
+
Expr *Base = E->IgnoreParenImpCasts();
while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
// Check for static member variables and don't warn on them.
@@ -8165,24 +8406,32 @@ namespace {
HandleDeclRefExpr(DRE);
return;
}
+
+ Visit(E);
}
- // Reference types are handled here since all uses of references are
- // bad, not just r-value uses.
+ // Reference types not handled in HandleValue are handled here since all
+ // uses of references are bad, not just r-value uses.
void VisitDeclRefExpr(DeclRefExpr *E) {
if (isReferenceType)
HandleDeclRefExpr(E);
}
void VisitImplicitCastExpr(ImplicitCastExpr *E) {
- if (E->getCastKind() == CK_LValueToRValue ||
- (isRecordType && E->getCastKind() == CK_NoOp))
+ if (E->getCastKind() == CK_LValueToRValue) {
HandleValue(E->getSubExpr());
+ return;
+ }
Inherited::VisitImplicitCastExpr(E);
}
void VisitMemberExpr(MemberExpr *E) {
+ if (isInitList) {
+ if (CheckInitListMemberExpr(E, true /*CheckReference*/))
+ return;
+ }
+
// Don't warn on arrays since they can be treated as pointers.
if (E->getType()->canDecayToPointerType()) return;
@@ -8209,11 +8458,14 @@ namespace {
}
void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
- if (E->getNumArgs() > 0)
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getArg(0)))
- HandleDeclRefExpr(DRE);
+ Expr *Callee = E->getCallee();
- Inherited::VisitCXXOperatorCallExpr(E);
+ if (isa<UnresolvedLookupExpr>(Callee))
+ return Inherited::VisitCXXOperatorCallExpr(E);
+
+ Visit(Callee);
+ for (auto Arg: E->arguments())
+ HandleValue(Arg->IgnoreParenImpCasts());
}
void VisitUnaryOperator(UnaryOperator *E) {
@@ -8224,11 +8476,65 @@ namespace {
HandleValue(E->getSubExpr());
return;
}
+
+ if (E->isIncrementDecrementOp()) {
+ HandleValue(E->getSubExpr());
+ return;
+ }
+
Inherited::VisitUnaryOperator(E);
}
void VisitObjCMessageExpr(ObjCMessageExpr *E) { return; }
+ void VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (E->getConstructor()->isCopyConstructor()) {
+ Expr *ArgExpr = E->getArg(0);
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(ArgExpr))
+ if (ILE->getNumInits() == 1)
+ ArgExpr = ILE->getInit(0);
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr))
+ if (ICE->getCastKind() == CK_NoOp)
+ ArgExpr = ICE->getSubExpr();
+ HandleValue(ArgExpr);
+ return;
+ }
+ Inherited::VisitCXXConstructExpr(E);
+ }
+
+ void VisitCallExpr(CallExpr *E) {
+ // Treat std::move as a use.
+ if (E->getNumArgs() == 1) {
+ if (FunctionDecl *FD = E->getDirectCallee()) {
+ if (FD->isInStdNamespace() && FD->getIdentifier() &&
+ FD->getIdentifier()->isStr("move")) {
+ HandleValue(E->getArg(0));
+ return;
+ }
+ }
+ }
+
+ Inherited::VisitCallExpr(E);
+ }
+
+ void VisitBinaryOperator(BinaryOperator *E) {
+ if (E->isCompoundAssignmentOp()) {
+ HandleValue(E->getLHS());
+ Visit(E->getRHS());
+ return;
+ }
+
+ Inherited::VisitBinaryOperator(E);
+ }
+
+ // A custom visitor for BinaryConditionalOperator is needed because the
+ // regular visitor would check the condition and true expression separately
+ // but both point to the same place giving duplicate diagnostics.
+ void VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
+ Visit(E->getCond());
+ Visit(E->getFalseExpr());
+ }
+
void HandleDeclRefExpr(DeclRefExpr *DRE) {
Decl* ReferenceDecl = DRE->getDecl();
if (OrigDecl != ReferenceDecl) return;
@@ -8268,7 +8574,7 @@ namespace {
if (DRE->getDecl() == OrigDecl)
return;
- SelfReferenceChecker(S, OrigDecl).Visit(E);
+ SelfReferenceChecker(S, OrigDecl).CheckExpr(E);
}
}
@@ -8279,8 +8585,10 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
bool DirectInit, bool TypeMayContainAuto) {
// If there is no declaration, there was an error parsing it. Just ignore
// the initializer.
- if (!RealDecl || RealDecl->isInvalidDecl())
+ if (!RealDecl || RealDecl->isInvalidDecl()) {
+ CorrectDelayedTyposInExpr(Init);
return;
+ }
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(RealDecl)) {
// With declarators parsed the way they are, the parser cannot
@@ -8511,6 +8819,21 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
Args = MultiExprArg(CXXDirectInit->getExprs(),
CXXDirectInit->getNumExprs());
+ // Try to correct any TypoExprs in the initialization arguments.
+ for (size_t Idx = 0; Idx < Args.size(); ++Idx) {
+ ExprResult Res =
+ CorrectDelayedTyposInExpr(Args[Idx], [this, Entity, Kind](Expr *E) {
+ InitializationSequence Init(*this, Entity, Kind, MultiExprArg(E));
+ return Init.Failed() ? ExprError() : E;
+ });
+ if (Res.isInvalid()) {
+ VDecl->setInvalidDecl();
+ return;
+ }
+ if (Res.get() != Args[Idx])
+ Args[Idx] = Res.get();
+ }
+
InitializationSequence InitSeq(*this, Entity, Kind, Args);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT);
if (Result.isInvalid()) {
@@ -9124,15 +9447,15 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
if (var->isThisDeclarationADefinition() &&
ActiveTemplateInstantiations.empty()) {
PragmaStack<StringLiteral *> *Stack = nullptr;
- int SectionFlags = PSF_Implicit | PSF_Read;
+ int SectionFlags = ASTContext::PSF_Implicit | ASTContext::PSF_Read;
if (var->getType().isConstQualified())
Stack = &ConstSegStack;
else if (!var->getInit()) {
Stack = &BSSSegStack;
- SectionFlags |= PSF_Write;
+ SectionFlags |= ASTContext::PSF_Write;
} else {
Stack = &DataSegStack;
- SectionFlags |= PSF_Write;
+ SectionFlags |= ASTContext::PSF_Write;
}
if (!var->hasAttr<SectionAttr>() && Stack->CurrentValue)
var->addAttr(
@@ -9244,7 +9567,7 @@ Sema::FinalizeDeclaration(Decl *ThisDecl) {
// Static locals inherit dll attributes from their function.
if (VD->isStaticLocal()) {
if (FunctionDecl *FD =
- dyn_cast<FunctionDecl>(VD->getParentFunctionOrMethod())) {
+ dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod())) {
if (Attr *A = getDLLAttr(FD)) {
auto *NewAttr = cast<InheritableAttr>(A->clone(getASTContext()));
NewAttr->setInherited(true);
@@ -9253,8 +9576,11 @@ Sema::FinalizeDeclaration(Decl *ThisDecl) {
}
}
+ // Grab the dllimport or dllexport attribute off of the VarDecl.
+ const InheritableAttr *DLLAttr = getDLLAttr(VD);
+
// Imported static data members cannot be defined out-of-line.
- if (const DLLImportAttr *IA = VD->getAttr<DLLImportAttr>()) {
+ if (const auto *IA = dyn_cast_or_null<DLLImportAttr>(DLLAttr)) {
if (VD->isStaticDataMember() && VD->isOutOfLine() &&
VD->isThisDeclarationADefinition()) {
// We allow definitions of dllimport class template static data members
@@ -9275,6 +9601,14 @@ Sema::FinalizeDeclaration(Decl *ThisDecl) {
}
}
+ // dllimport/dllexport variables cannot be thread local, their TLS index
+ // isn't exported with the variable.
+ if (DLLAttr && VD->getTLSKind()) {
+ Diag(VD->getLocation(), diag::err_attribute_dll_thread_local) << VD
+ << DLLAttr;
+ VD->setInvalidDecl();
+ }
+
if (UsedAttr *Attr = VD->getAttr<UsedAttr>()) {
if (!Attr->isInherited() && !VD->isThisDeclarationADefinition()) {
Diag(Attr->getLocation(), diag::warn_attribute_ignored) << Attr;
@@ -9466,12 +9800,12 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
// Verify C99 6.7.5.3p2: The only SCS allowed is 'register'.
// C++03 [dcl.stc]p2 also permits 'auto'.
- VarDecl::StorageClass StorageClass = SC_None;
+ StorageClass SC = SC_None;
if (DS.getStorageClassSpec() == DeclSpec::SCS_register) {
- StorageClass = SC_Register;
+ SC = SC_Register;
} else if (getLangOpts().CPlusPlus &&
DS.getStorageClassSpec() == DeclSpec::SCS_auto) {
- StorageClass = SC_Auto;
+ SC = SC_Auto;
} else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified) {
Diag(DS.getStorageClassSpecLoc(),
diag::err_invalid_storage_class_in_func_decl);
@@ -9545,7 +9879,7 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
D.getLocStart(),
D.getIdentifierLoc(), II,
parmDeclType, TInfo,
- StorageClass);
+ SC);
if (D.isInvalidType())
New->setInvalidDecl();
@@ -9637,7 +9971,7 @@ void Sema::DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Param,
ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
- VarDecl::StorageClass StorageClass) {
+ StorageClass SC) {
// In ARC, infer a lifetime qualifier for appropriate parameter types.
if (getLangOpts().ObjCAutoRefCount &&
T.getObjCLifetime() == Qualifiers::OCL_None &&
@@ -9663,8 +9997,7 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
ParmVarDecl *New = ParmVarDecl::Create(Context, DC, StartLoc, NameLoc, Name,
Context.getAdjustedParameterType(T),
- TSInfo,
- StorageClass, nullptr);
+ TSInfo, SC, nullptr);
// Parameters can not be abstract class types.
// For record types, this is done by the AbstractClassUsageDiagnoser once
@@ -9850,6 +10183,7 @@ static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
// Add the captures to the LSI so they can be noted as already
// captured within tryCaptureVar.
+ auto I = LambdaClass->field_begin();
for (const auto &C : LambdaClass->captures()) {
if (C.capturesVariable()) {
VarDecl *VD = C.getCapturedVar();
@@ -9858,7 +10192,7 @@ static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
QualType CaptureType = VD->getType();
const bool ByRef = C.getCaptureKind() == LCK_ByRef;
LSI->addCapture(VD, /*IsBlock*/false, ByRef,
- /*RefersToEnclosingLocal*/true, C.getLocation(),
+ /*RefersToEnclosingVariableOrCapture*/true, C.getLocation(),
/*EllipsisLoc*/C.isPackExpansion()
? C.getEllipsisLoc() : SourceLocation(),
CaptureType, /*Expr*/ nullptr);
@@ -9866,7 +10200,10 @@ static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
} else if (C.capturesThis()) {
LSI->addThisCapture(/*Nested*/ false, C.getLocation(),
S.getCurrentThisType(), /*Expr*/ nullptr);
+ } else {
+ LSI->addVLATypeCapture(C.getLocation(), I->getType());
}
+ ++I;
}
}
@@ -10107,7 +10444,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (FD) {
FD->setBody(Body);
- if (getLangOpts().CPlusPlus1y && !FD->isInvalidDecl() && Body &&
+ if (getLangOpts().CPlusPlus14 && !FD->isInvalidDecl() && Body &&
!FD->isDependentContext() && FD->getReturnType()->isUndeducedType()) {
// If the function has a deduced result type but contains no 'return'
// statements, the result type as written must be exactly 'auto', and
@@ -10118,8 +10455,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
FD->setInvalidDecl();
} else {
// Substitute 'void' for the 'auto' in the type.
- TypeLoc ResultType = FD->getTypeSourceInfo()->getTypeLoc().
- IgnoreParens().castAs<FunctionProtoTypeLoc>().getReturnLoc();
+ TypeLoc ResultType = getReturnTypeLoc(FD);
Context.adjustDeducedFunctionResultType(
FD, SubstAutoType(ResultType.getType(), Context.VoidTy));
}
@@ -10154,9 +10490,11 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
DiagnoseSizeOfParametersAndReturnValue(FD->param_begin(), FD->param_end(),
FD->getReturnType(), FD);
- // If this is a constructor, we need a vtable.
+ // If this is a structor, we need a vtable.
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(FD))
MarkVTableUsed(FD->getLocation(), Constructor->getParent());
+ else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(FD))
+ MarkVTableUsed(FD->getLocation(), Destructor->getParent());
// Try to apply the named return value optimization. We have to check
// if we can do this here because lambdas keep return statements around
@@ -10265,7 +10603,19 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
!CheckConstexprFunctionBody(FD, Body)))
FD->setInvalidDecl();
- assert(ExprCleanupObjects.empty() && "Leftover temporaries in function");
+ if (FD && FD->hasAttr<NakedAttr>()) {
+ for (const Stmt *S : Body->children()) {
+ if (!isa<AsmStmt>(S) && !isa<NullStmt>(S)) {
+ Diag(S->getLocStart(), diag::err_non_asm_stmt_in_naked_function);
+ Diag(FD->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
+ FD->setInvalidDecl();
+ break;
+ }
+ }
+ }
+
+ assert(ExprCleanupObjects.size() == ExprEvalContexts.back().NumCleanupObjects
+ && "Leftover temporaries in function");
assert(!ExprNeedsCleanups && "Unaccounted cleanups in function");
assert(MaybeODRUseExprs.empty() &&
"Leftover expressions for odr-use checking");
@@ -10329,10 +10679,10 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
// function declaration is going to be treated as an error.
if (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error) {
TypoCorrection Corrected;
- DeclFilterCCC<FunctionDecl> Validator;
- if (S && (Corrected = CorrectTypo(DeclarationNameInfo(&II, Loc),
- LookupOrdinaryName, S, nullptr, Validator,
- CTK_NonError)))
+ if (S &&
+ (Corrected = CorrectTypo(
+ DeclarationNameInfo(&II, Loc), LookupOrdinaryName, S, nullptr,
+ llvm::make_unique<DeclFilterCCC<FunctionDecl>>(), CTK_NonError)))
diagnoseTypo(Corrected, PDiag(diag::note_function_suggestion),
/*ErrorRecovery*/false);
}
@@ -10360,6 +10710,7 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
/*RefQualifierLoc=*/NoLoc,
/*ConstQualifierLoc=*/NoLoc,
/*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc,
/*MutableLoc=*/NoLoc,
EST_None,
/*ESpecLoc=*/NoLoc,
@@ -10367,6 +10718,7 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
/*ExceptionRanges=*/nullptr,
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
+ /*ExceptionSpecTokens=*/nullptr,
Loc, Loc, D),
DS.getAttributes(),
SourceLocation());
@@ -11262,9 +11614,8 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
} else {
// If the type is currently being defined, complain
// about a nested redefinition.
- const TagType *Tag
- = cast<TagType>(Context.getTagDeclType(PrevTagDecl));
- if (Tag->isBeingDefined()) {
+ auto *TD = Context.getTagDeclType(PrevTagDecl)->getAsTagDecl();
+ if (TD->isBeingDefined()) {
Diag(NameLoc, diag::err_nested_redefinition) << Name;
Diag(PrevTagDecl->getLocation(),
diag::note_previous_definition);
@@ -11454,7 +11805,7 @@ CreateNewDecl:
// CheckMemberSpecialization, below.
if (!isExplicitSpecialization &&
(TUK == TUK_Definition || TUK == TUK_Declaration) &&
- diagnoseQualifiedDeclaration(SS, DC, OrigName, NameLoc))
+ diagnoseQualifiedDeclaration(SS, DC, OrigName, Loc))
Invalid = true;
New->setQualifierInfo(SS.getWithLocInContext(Context));
@@ -12430,8 +12781,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
continue;
}
// Okay, we have a legal flexible array member at the end of the struct.
- if (Record)
- Record->setHasFlexibleArrayMember(true);
+ Record->setHasFlexibleArrayMember(true);
} else if (!FDTy->isDependentType() &&
RequireCompleteType(FD->getLocation(), FD->getType(),
diag::err_field_incomplete)) {
@@ -12440,11 +12790,11 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
EnclosingDecl->setInvalidDecl();
continue;
} else if (const RecordType *FDTTy = FDTy->getAs<RecordType>()) {
- if (FDTTy->getDecl()->hasFlexibleArrayMember()) {
- // If this is a member of a union, then entire union becomes "flexible".
- if (Record && Record->isUnion()) {
- Record->setHasFlexibleArrayMember(true);
- } else {
+ if (Record && FDTTy->getDecl()->hasFlexibleArrayMember()) {
+ // A type which contains a flexible array member is considered to be a
+ // flexible array member.
+ Record->setHasFlexibleArrayMember(true);
+ if (!Record->isUnion()) {
// If this is a struct/class and this is not the last element, reject
// it. Note that GCC supports variable sized arrays in the middle of
// structures.
@@ -12456,8 +12806,6 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// other structs as an extension.
Diag(FD->getLocation(), diag::ext_flexible_array_in_struct)
<< FD->getDeclName();
- if (Record)
- Record->setHasFlexibleArrayMember(true);
}
}
}
@@ -13171,6 +13519,49 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
}
}
+bool
+Sema::IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
+ bool AllowMask) const {
+ FlagEnumAttr *FEAttr = ED->getAttr<FlagEnumAttr>();
+ assert(FEAttr && "looking for value in non-flag enum");
+
+ llvm::APInt FlagMask = ~FEAttr->getFlagBits();
+ unsigned Width = FlagMask.getBitWidth();
+
+ // We will try a zero-extended value for the regular check first.
+ llvm::APInt ExtVal = Val.zextOrSelf(Width);
+
+ // A value is in a flag enum if either its bits are a subset of the enum's
+ // flag bits (the first condition) or we are allowing masks and the same is
+ // true of its complement (the second condition). When masks are allowed, we
+ // allow the common idiom of ~(enum1 | enum2) to be a valid enum value.
+ //
+ // While it's true that any value could be used as a mask, the assumption is
+ // that a mask will have all of the insignificant bits set. Anything else is
+ // likely a logic error.
+ if (!(FlagMask & ExtVal))
+ return true;
+
+ if (AllowMask) {
+ // Try a one-extended value instead. This can happen if the enum is wider
+ // than the constant used, in C with extensions to allow for wider enums.
+ // The mask will still have the correct behaviour, so we give the user the
+ // benefit of the doubt.
+ //
+ // FIXME: This heuristic can cause weird results if the enum was extended
+ // to a larger type and is signed, because then bit-masks of smaller types
+ // that get extended will fall out of range (e.g. ~0x1u). We currently don't
+ // detect that case and will get a false positive for it. In most cases,
+ // though, it can be fixed by making it a signed type (e.g. ~0x1), so it may
+ // be fine just to accept this as a warning.
+ ExtVal |= llvm::APInt::getHighBitsSet(Width, Width - Val.getBitWidth());
+ if (!(FlagMask & ~ExtVal))
+ return true;
+ }
+
+ return false;
+}
+
void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDeclX,
ArrayRef<Decl *> Elements,
@@ -13256,10 +13647,8 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
BestPromotionType = Context.getPromotedIntegerType(BestType);
else
BestPromotionType = BestType;
- // We don't need to set BestWidth, because BestType is going to be the type
- // of the enumerators, but we do anyway because otherwise some compilers
- // warn that it might be used uninitialized.
- BestWidth = CharWidth;
+
+ BestWidth = Context.getIntWidth(BestType);
}
else if (NumNegativeBits) {
// If there is a negative value, figure out the smallest integer type (of
@@ -13324,10 +13713,15 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
}
}
+ FlagEnumAttr *FEAttr = Enum->getAttr<FlagEnumAttr>();
+ if (FEAttr)
+ FEAttr->getFlagBits() = llvm::APInt(BestWidth, 0);
+
// Loop over all of the enumerator constants, changing their types to match
- // the type of the enum if needed.
- for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
- EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Elements[i]);
+ // the type of the enum if needed. If we have a flag type, we also prepare the
+ // FlagBits cache.
+ for (auto *D : Elements) {
+ auto *ECD = cast_or_null<EnumConstantDecl>(D);
if (!ECD) continue; // Already issued a diagnostic.
// Standard C says the enumerators have int type, but we allow, as an
@@ -13357,7 +13751,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
// enum-specifier, each enumerator has the type of its
// enumeration.
ECD->setType(EnumType);
- continue;
+ goto flagbits;
} else {
NewTy = BestType;
NewWidth = BestWidth;
@@ -13384,8 +13778,32 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
ECD->setType(EnumType);
else
ECD->setType(NewTy);
+
+flagbits:
+ // Check to see if we have a constant with exactly one bit set. Note that x
+ // & (x - 1) will be nonzero if and only if x has more than one bit set.
+ if (FEAttr) {
+ llvm::APInt ExtVal = InitVal.zextOrSelf(BestWidth);
+ if (ExtVal != 0 && !(ExtVal & (ExtVal - 1))) {
+ FEAttr->getFlagBits() |= ExtVal;
+ }
+ }
}
+ if (FEAttr) {
+ for (Decl *D : Elements) {
+ EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(D);
+ if (!ECD) continue; // Already issued a diagnostic.
+
+ llvm::APSInt InitVal = ECD->getInitVal();
+ if (InitVal != 0 && !IsValueInFlagEnum(Enum, InitVal, true))
+ Diag(ECD->getLocation(), diag::warn_flag_enum_constant_out_of_range)
+ << ECD << Enum;
+ }
+ }
+
+
+
Enum->completeDefinition(BestType, BestPromotionType,
NumPositiveBits, NumNegativeBits);
@@ -13455,6 +13873,9 @@ DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc,
if (Mod->getTopLevelModuleName() == getLangOpts().CurrentModule)
Diag(ImportLoc, diag::err_module_self_import)
<< Mod->getFullModuleName() << getLangOpts().CurrentModule;
+ else if (Mod->getTopLevelModuleName() == getLangOpts().ImplementationOfModule)
+ Diag(ImportLoc, diag::err_module_import_in_implementation)
+ << Mod->getFullModuleName() << getLangOpts().ImplementationOfModule;
SmallVector<SourceLocation, 2> IdentifierLocs;
Module *ModCheck = Mod;
@@ -13575,5 +13996,6 @@ AvailabilityResult Sema::getCurContextAvailability() const {
dyn_cast<ObjCImplementationDecl>(D)) {
D = ID->getClassInterface();
}
- return D->getAvailability();
+ // Recover from user error.
+ return D ? D->getAvailability() : AR_Available;
}
diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp
index 61683cd87579..17a849ea7af0 100644
--- a/lib/Sema/SemaDeclAttr.cpp
+++ b/lib/Sema/SemaDeclAttr.cpp
@@ -29,6 +29,7 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/MathExtras.h"
using namespace clang;
using namespace sema;
@@ -88,21 +89,39 @@ static QualType getFunctionOrMethodParamType(const Decl *D, unsigned Idx) {
return cast<ObjCMethodDecl>(D)->parameters()[Idx]->getType();
}
+static SourceRange getFunctionOrMethodParamRange(const Decl *D, unsigned Idx) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ return FD->getParamDecl(Idx)->getSourceRange();
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->parameters()[Idx]->getSourceRange();
+ if (const auto *BD = dyn_cast<BlockDecl>(D))
+ return BD->getParamDecl(Idx)->getSourceRange();
+ return SourceRange();
+}
+
static QualType getFunctionOrMethodResultType(const Decl *D) {
if (const FunctionType *FnTy = D->getFunctionType())
return cast<FunctionType>(FnTy)->getReturnType();
return cast<ObjCMethodDecl>(D)->getReturnType();
}
+static SourceRange getFunctionOrMethodResultSourceRange(const Decl *D) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ return FD->getReturnTypeSourceRange();
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getReturnTypeSourceRange();
+ return SourceRange();
+}
+
static bool isFunctionOrMethodVariadic(const Decl *D) {
if (const FunctionType *FnTy = D->getFunctionType()) {
const FunctionProtoType *proto = cast<FunctionProtoType>(FnTy);
return proto->isVariadic();
- } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
- return BD->isVariadic();
- else {
- return cast<ObjCMethodDecl>(D)->isVariadic();
}
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ return BD->isVariadic();
+
+ return cast<ObjCMethodDecl>(D)->isVariadic();
}
static bool isInstanceMethod(const Decl *D) {
@@ -148,30 +167,43 @@ static unsigned getNumAttributeArgs(const AttributeList &Attr) {
return Attr.getNumArgs() + Attr.hasParsedType();
}
-/// \brief Check if the attribute has exactly as many args as Num. May
-/// output an error.
-static bool checkAttributeNumArgs(Sema &S, const AttributeList &Attr,
- unsigned Num) {
- if (getNumAttributeArgs(Attr) != Num) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
- << Attr.getName() << Num;
+template <typename Compare>
+static bool checkAttributeNumArgsImpl(Sema &S, const AttributeList &Attr,
+ unsigned Num, unsigned Diag,
+ Compare Comp) {
+ if (Comp(getNumAttributeArgs(Attr), Num)) {
+ S.Diag(Attr.getLoc(), Diag) << Attr.getName() << Num;
return false;
}
return true;
}
+/// \brief Check if the attribute has exactly as many args as Num. May
+/// output an error.
+static bool checkAttributeNumArgs(Sema &S, const AttributeList &Attr,
+ unsigned Num) {
+ return checkAttributeNumArgsImpl(S, Attr, Num,
+ diag::err_attribute_wrong_number_arguments,
+ std::not_equal_to<unsigned>());
+}
+
/// \brief Check if the attribute has at least as many args as Num. May
/// output an error.
static bool checkAttributeAtLeastNumArgs(Sema &S, const AttributeList &Attr,
unsigned Num) {
- if (getNumAttributeArgs(Attr) < Num) {
- S.Diag(Attr.getLoc(), diag::err_attribute_too_few_arguments)
- << Attr.getName() << Num;
- return false;
- }
+ return checkAttributeNumArgsImpl(S, Attr, Num,
+ diag::err_attribute_too_few_arguments,
+ std::less<unsigned>());
+}
- return true;
+/// \brief Check if the attribute has at most as many args as Num. May
+/// output an error.
+static bool checkAttributeAtMostNumArgs(Sema &S, const AttributeList &Attr,
+ unsigned Num) {
+ return checkAttributeNumArgsImpl(S, Attr, Num,
+ diag::err_attribute_too_many_arguments,
+ std::greater<unsigned>());
}
/// \brief If Expr is a valid integer constant, get the value of the integer
@@ -192,6 +224,13 @@ static bool checkUInt32Argument(Sema &S, const AttributeList &Attr,
<< Expr->getSourceRange();
return false;
}
+
+ if (!I.isIntN(32)) {
+ S.Diag(Expr->getExprLoc(), diag::err_ice_too_large)
+ << I.toString(10, false) << 32 << /* Unsigned */ 1;
+ return false;
+ }
+
Val = (uint32_t)I.getZExtValue();
return true;
}
@@ -528,10 +567,6 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
// Attribute Implementations
//===----------------------------------------------------------------------===//
-// FIXME: All this manual attribute parsing code is gross. At the
-// least add some helper functions to check most argument patterns (#
-// and types of args).
-
static void handlePtGuardedVarAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
if (!threadSafetyCheckIsPointer(S, D, Attr))
@@ -706,11 +741,9 @@ static void handleExclusiveTrylockFunctionAttr(Sema &S, Decl *D,
if (!checkTryLockFunAttrCommon(S, D, Attr, Args))
return;
- D->addAttr(::new (S.Context)
- ExclusiveTrylockFunctionAttr(Attr.getRange(), S.Context,
- Attr.getArgAsExpr(0),
- Args.data(), Args.size(),
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) ExclusiveTrylockFunctionAttr(
+ Attr.getRange(), S.Context, Attr.getArgAsExpr(0), Args.data(),
+ Args.size(), Attr.getAttributeSpellingListIndex()));
}
static void handleLockReturnedAttr(Sema &S, Decl *D,
@@ -828,8 +861,14 @@ static void handleCallableWhenAttr(Sema &S, Decl *D,
StringRef StateString;
SourceLocation Loc;
- if (!S.checkStringLiteralArgumentAttr(Attr, ArgIndex, StateString, &Loc))
- return;
+ if (Attr.isArgIdent(ArgIndex)) {
+ IdentifierLoc *Ident = Attr.getArgAsIdent(ArgIndex);
+ StateString = Ident->Ident->getName();
+ Loc = Ident->Loc;
+ } else {
+ if (!S.checkStringLiteralArgumentAttr(Attr, ArgIndex, StateString, &Loc))
+ return;
+ }
if (!CallableWhenAttr::ConvertStrToConsumedState(StateString,
CallableState)) {
@@ -849,8 +888,6 @@ static void handleCallableWhenAttr(Sema &S, Decl *D,
static void handleParamTypestateAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
- if (!checkAttributeNumArgs(S, Attr, 1)) return;
-
ParamTypestateAttr::ConsumedState ParamState;
if (Attr.isArgIdent(0)) {
@@ -889,8 +926,6 @@ static void handleParamTypestateAttr(Sema &S, Decl *D,
static void handleReturnTypestateAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
- if (!checkAttributeNumArgs(S, Attr, 1)) return;
-
ReturnTypestateAttr::ConsumedState ReturnState;
if (Attr.isArgIdent(0)) {
@@ -939,9 +974,6 @@ static void handleReturnTypestateAttr(Sema &S, Decl *D,
static void handleSetTypestateAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (!checkAttributeNumArgs(S, Attr, 1))
- return;
-
if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr))
return;
@@ -967,9 +999,6 @@ static void handleSetTypestateAttr(Sema &S, Decl *D, const AttributeList &Attr)
static void handleTestTypestateAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
- if (!checkAttributeNumArgs(S, Attr, 1))
- return;
-
if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr))
return;
@@ -1101,30 +1130,39 @@ static void handleIBOutletCollection(Sema &S, Decl *D,
Attr.getAttributeSpellingListIndex()));
}
-static void possibleTransparentUnionPointerType(QualType &T) {
- if (const RecordType *UT = T->getAsUnionType())
+bool Sema::isValidPointerAttrType(QualType T, bool RefOkay) {
+ if (RefOkay) {
+ if (T->isReferenceType())
+ return true;
+ } else {
+ T = T.getNonReferenceType();
+ }
+
+ // The nonnull attribute, and other similar attributes, can be applied to a
+ // transparent union that contains a pointer type.
+ if (const RecordType *UT = T->getAsUnionType()) {
if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
RecordDecl *UD = UT->getDecl();
for (const auto *I : UD->fields()) {
QualType QT = I->getType();
- if (QT->isAnyPointerType() || QT->isBlockPointerType()) {
- T = QT;
- return;
- }
+ if (QT->isAnyPointerType() || QT->isBlockPointerType())
+ return true;
}
}
+ }
+
+ return T->isAnyPointerType() || T->isBlockPointerType();
}
static bool attrNonNullArgCheck(Sema &S, QualType T, const AttributeList &Attr,
- SourceRange R, bool isReturnValue = false) {
- T = T.getNonReferenceType();
- possibleTransparentUnionPointerType(T);
-
- if (!T->isAnyPointerType() && !T->isBlockPointerType()) {
- S.Diag(Attr.getLoc(),
- isReturnValue ? diag::warn_attribute_return_pointers_only
- : diag::warn_attribute_pointers_only)
- << Attr.getName() << R;
+ SourceRange AttrParmRange,
+ SourceRange TypeRange,
+ bool isReturnValue = false) {
+ if (!S.isValidPointerAttrType(T)) {
+ S.Diag(Attr.getLoc(), isReturnValue
+ ? diag::warn_attribute_return_pointers_only
+ : diag::warn_attribute_pointers_only)
+ << Attr.getName() << AttrParmRange << TypeRange;
return false;
}
return true;
@@ -1132,46 +1170,45 @@ static bool attrNonNullArgCheck(Sema &S, QualType T, const AttributeList &Attr,
static void handleNonNullAttr(Sema &S, Decl *D, const AttributeList &Attr) {
SmallVector<unsigned, 8> NonNullArgs;
- for (unsigned i = 0; i < Attr.getNumArgs(); ++i) {
- Expr *Ex = Attr.getArgAsExpr(i);
+ for (unsigned I = 0; I < Attr.getNumArgs(); ++I) {
+ Expr *Ex = Attr.getArgAsExpr(I);
uint64_t Idx;
- if (!checkFunctionOrMethodParameterIndex(S, D, Attr, i + 1, Ex, Idx))
+ if (!checkFunctionOrMethodParameterIndex(S, D, Attr, I + 1, Ex, Idx))
return;
// Is the function argument a pointer type?
- // FIXME: Should also highlight argument in decl in the diagnostic.
- if (!attrNonNullArgCheck(S, getFunctionOrMethodParamType(D, Idx), Attr,
- Ex->getSourceRange()))
+ if (Idx < getFunctionOrMethodNumParams(D) &&
+ !attrNonNullArgCheck(S, getFunctionOrMethodParamType(D, Idx), Attr,
+ Ex->getSourceRange(),
+ getFunctionOrMethodParamRange(D, Idx)))
continue;
NonNullArgs.push_back(Idx);
}
// If no arguments were specified to __attribute__((nonnull)) then all pointer
- // arguments have a nonnull attribute.
- if (NonNullArgs.empty()) {
- for (unsigned i = 0, e = getFunctionOrMethodNumParams(D); i != e; ++i) {
- QualType T = getFunctionOrMethodParamType(D, i).getNonReferenceType();
- possibleTransparentUnionPointerType(T);
- if (T->isAnyPointerType() || T->isBlockPointerType())
- NonNullArgs.push_back(i);
+ // arguments have a nonnull attribute; warn if there aren't any. Skip this
+ // check if the attribute came from a macro expansion or a template
+ // instantiation.
+ if (NonNullArgs.empty() && Attr.getLoc().isFileID() &&
+ S.ActiveTemplateInstantiations.empty()) {
+ bool AnyPointers = isFunctionOrMethodVariadic(D);
+ for (unsigned I = 0, E = getFunctionOrMethodNumParams(D);
+ I != E && !AnyPointers; ++I) {
+ QualType T = getFunctionOrMethodParamType(D, I);
+ if (T->isDependentType() || S.isValidPointerAttrType(T))
+ AnyPointers = true;
}
- // No pointer arguments?
- if (NonNullArgs.empty()) {
- // Warn the trivial case only if attribute is not coming from a
- // macro instantiation.
- if (Attr.getLoc().isFileID())
- S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_no_pointers);
- return;
- }
+ if (!AnyPointers)
+ S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_no_pointers);
}
- unsigned *start = &NonNullArgs[0];
- unsigned size = NonNullArgs.size();
- llvm::array_pod_sort(start, start + size);
+ unsigned *Start = NonNullArgs.data();
+ unsigned Size = NonNullArgs.size();
+ llvm::array_pod_sort(Start, Start + Size);
D->addAttr(::new (S.Context)
- NonNullAttr(Attr.getRange(), S.Context, start, size,
+ NonNullAttr(Attr.getRange(), S.Context, Start, Size,
Attr.getAttributeSpellingListIndex()));
}
@@ -1188,7 +1225,8 @@ static void handleNonNullAttrParameter(Sema &S, ParmVarDecl *D,
}
// Is the argument a pointer type?
- if (!attrNonNullArgCheck(S, D->getType(), Attr, D->getSourceRange()))
+ if (!attrNonNullArgCheck(S, D->getType(), Attr, SourceRange(),
+ D->getSourceRange()))
return;
D->addAttr(::new (S.Context)
@@ -1199,7 +1237,8 @@ static void handleNonNullAttrParameter(Sema &S, ParmVarDecl *D,
static void handleReturnsNonNullAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
QualType ResultType = getFunctionOrMethodResultType(D);
- if (!attrNonNullArgCheck(S, ResultType, Attr, Attr.getRange(),
+ SourceRange SR = getFunctionOrMethodResultSourceRange(D);
+ if (!attrNonNullArgCheck(S, ResultType, Attr, SourceRange(), SR,
/* isReturnValue */ true))
return;
@@ -1208,6 +1247,65 @@ static void handleReturnsNonNullAttr(Sema &S, Decl *D,
Attr.getAttributeSpellingListIndex()));
}
+static void handleAssumeAlignedAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ Expr *E = Attr.getArgAsExpr(0),
+ *OE = Attr.getNumArgs() > 1 ? Attr.getArgAsExpr(1) : nullptr;
+ S.AddAssumeAlignedAttr(Attr.getRange(), D, E, OE,
+ Attr.getAttributeSpellingListIndex());
+}
+
+void Sema::AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
+ Expr *OE, unsigned SpellingListIndex) {
+ QualType ResultType = getFunctionOrMethodResultType(D);
+ SourceRange SR = getFunctionOrMethodResultSourceRange(D);
+
+ AssumeAlignedAttr TmpAttr(AttrRange, Context, E, OE, SpellingListIndex);
+ SourceLocation AttrLoc = AttrRange.getBegin();
+
+ if (!isValidPointerAttrType(ResultType, /* RefOkay */ true)) {
+ Diag(AttrLoc, diag::warn_attribute_return_pointers_refs_only)
+ << &TmpAttr << AttrRange << SR;
+ return;
+ }
+
+ if (!E->isValueDependent()) {
+ llvm::APSInt I(64);
+ if (!E->isIntegerConstantExpr(I, Context)) {
+ if (OE)
+ Diag(AttrLoc, diag::err_attribute_argument_n_type)
+ << &TmpAttr << 1 << AANT_ArgumentIntegerConstant
+ << E->getSourceRange();
+ else
+ Diag(AttrLoc, diag::err_attribute_argument_type)
+ << &TmpAttr << AANT_ArgumentIntegerConstant
+ << E->getSourceRange();
+ return;
+ }
+
+ if (!I.isPowerOf2()) {
+ Diag(AttrLoc, diag::err_alignment_not_power_of_two)
+ << E->getSourceRange();
+ return;
+ }
+ }
+
+ if (OE) {
+ if (!OE->isValueDependent()) {
+ llvm::APSInt I(64);
+ if (!OE->isIntegerConstantExpr(I, Context)) {
+ Diag(AttrLoc, diag::err_attribute_argument_n_type)
+ << &TmpAttr << 2 << AANT_ArgumentIntegerConstant
+ << OE->getSourceRange();
+ return;
+ }
+ }
+ }
+
+ D->addAttr(::new (Context)
+ AssumeAlignedAttr(AttrRange, Context, E, OE, SpellingListIndex));
+}
+
static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
// This attribute must be applied to a function declaration. The first
// argument to the attribute must be an identifier, the name of the resource,
@@ -1286,13 +1384,26 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
// Check we don't have a conflict with another ownership attribute.
for (const auto *I : D->specific_attrs<OwnershipAttr>()) {
- // FIXME: A returns attribute should conflict with any returns attribute
- // with a different index too.
+ // Cannot have two ownership attributes of different kinds for the same
+ // index.
if (I->getOwnKind() != K && I->args_end() !=
std::find(I->args_begin(), I->args_end(), Idx)) {
S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
<< AL.getName() << I;
return;
+ } else if (K == OwnershipAttr::Returns &&
+ I->getOwnKind() == OwnershipAttr::Returns) {
+ // A returns attribute conflicts with any other returns attribute using
+ // a different index. Note, diagnostic reporting is 1-based, but stored
+ // argument indexes are 0-based.
+ if (std::find(I->args_begin(), I->args_end(), Idx) == I->args_end()) {
+ S.Diag(I->getLocation(), diag::err_ownership_returns_index_mismatch)
+ << *(I->args_begin()) + 1;
+ if (I->args_size())
+ S.Diag(AL.getLoc(), diag::note_ownership_returns_index_mismatch)
+ << (unsigned)Idx + 1 << Ex->getSourceRange();
+ return;
+ }
}
}
OwnershipArgs.push_back(Idx);
@@ -1586,15 +1697,8 @@ static void handleUsedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
static void handleConstructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- // check the attribute arguments.
- if (Attr.getNumArgs() > 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
- << Attr.getName() << 1;
- return;
- }
-
uint32_t priority = ConstructorAttr::DefaultPriority;
- if (Attr.getNumArgs() > 0 &&
+ if (Attr.getNumArgs() &&
!checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), priority))
return;
@@ -1604,15 +1708,8 @@ static void handleConstructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
static void handleDestructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- // check the attribute arguments.
- if (Attr.getNumArgs() > 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
- << Attr.getName() << 1;
- return;
- }
-
uint32_t priority = DestructorAttr::DefaultPriority;
- if (Attr.getNumArgs() > 0 &&
+ if (Attr.getNumArgs() &&
!checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), priority))
return;
@@ -1624,16 +1721,9 @@ static void handleDestructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
template <typename AttrTy>
static void handleAttrWithMessage(Sema &S, Decl *D,
const AttributeList &Attr) {
- unsigned NumArgs = Attr.getNumArgs();
- if (NumArgs > 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
- << Attr.getName() << 1;
- return;
- }
-
// Handle the case where the attribute has a text message.
StringRef Str;
- if (NumArgs == 1 && !S.checkStringLiteralArgumentAttr(Attr, 0, Str))
+ if (Attr.getNumArgs() == 1 && !S.checkStringLiteralArgumentAttr(Attr, 0, Str))
return;
D->addAttr(::new (S.Context) AttrTy(Attr.getRange(), S.Context, Str,
@@ -2036,13 +2126,6 @@ static void handleBlocksAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
static void handleSentinelAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- // check the attribute arguments.
- if (Attr.getNumArgs() > 2) {
- S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
- << Attr.getName() << 2;
- return;
- }
-
unsigned sentinel = (unsigned)SentinelAttr::DefaultSentinel;
if (Attr.getNumArgs() > 0) {
Expr *E = Attr.getArgAsExpr(0);
@@ -2349,10 +2432,9 @@ static void handleFormatArgAttr(Sema &S, Decl *D, const AttributeList &Attr) {
!isCFStringType(Ty, S.Context) &&
(!Ty->isPointerType() ||
!Ty->getAs<PointerType>()->getPointeeType()->isCharType())) {
- // FIXME: Should highlight the actual expression that has the wrong type.
S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
- << (not_nsstring_type ? "a string type" : "an NSString")
- << IdxExpr->getSourceRange();
+ << (not_nsstring_type ? "a string type" : "an NSString")
+ << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, 0);
return;
}
Ty = getFunctionOrMethodResultType(D);
@@ -2360,10 +2442,9 @@ static void handleFormatArgAttr(Sema &S, Decl *D, const AttributeList &Attr) {
!isCFStringType(Ty, S.Context) &&
(!Ty->isPointerType() ||
!Ty->getAs<PointerType>()->getPointeeType()->isCharType())) {
- // FIXME: Should highlight the actual expression that has the wrong type.
S.Diag(Attr.getLoc(), diag::err_format_attribute_result_not)
- << (not_nsstring_type ? "string type" : "NSString")
- << IdxExpr->getSourceRange();
+ << (not_nsstring_type ? "string type" : "NSString")
+ << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, 0);
return;
}
@@ -2534,23 +2615,24 @@ static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (Kind == CFStringFormat) {
if (!isCFStringType(Ty, S.Context)) {
S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
- << "a CFString" << IdxExpr->getSourceRange();
+ << "a CFString" << IdxExpr->getSourceRange()
+ << getFunctionOrMethodParamRange(D, ArgIdx);
return;
}
} else if (Kind == NSStringFormat) {
// FIXME: do we need to check if the type is NSString*? What are the
// semantics?
if (!isNSStringType(Ty, S.Context)) {
- // FIXME: Should highlight the actual expression that has the wrong type.
S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
- << "an NSString" << IdxExpr->getSourceRange();
+ << "an NSString" << IdxExpr->getSourceRange()
+ << getFunctionOrMethodParamRange(D, ArgIdx);
return;
}
} else if (!Ty->isPointerType() ||
!Ty->getAs<PointerType>()->getPointeeType()->isCharType()) {
- // FIXME: Should highlight the actual expression that has the wrong type.
S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
- << "a string type" << IdxExpr->getSourceRange();
+ << "a string type" << IdxExpr->getSourceRange()
+ << getFunctionOrMethodParamRange(D, ArgIdx);
return;
}
@@ -2679,6 +2761,58 @@ static void handleAnnotateAttr(Sema &S, Decl *D, const AttributeList &Attr) {
Attr.getAttributeSpellingListIndex()));
}
+static void handleAlignValueAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ S.AddAlignValueAttr(Attr.getRange(), D, Attr.getArgAsExpr(0),
+ Attr.getAttributeSpellingListIndex());
+}
+
+void Sema::AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
+ unsigned SpellingListIndex) {
+ AlignValueAttr TmpAttr(AttrRange, Context, E, SpellingListIndex);
+ SourceLocation AttrLoc = AttrRange.getBegin();
+
+ QualType T;
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D))
+ T = TD->getUnderlyingType();
+ else if (ValueDecl *VD = dyn_cast<ValueDecl>(D))
+ T = VD->getType();
+ else
+ llvm_unreachable("Unknown decl type for align_value");
+
+ if (!T->isDependentType() && !T->isAnyPointerType() &&
+ !T->isReferenceType() && !T->isMemberPointerType()) {
+ Diag(AttrLoc, diag::warn_attribute_pointer_or_reference_only)
+ << &TmpAttr /*TmpAttr.getName()*/ << T << D->getSourceRange();
+ return;
+ }
+
+ if (!E->isValueDependent()) {
+ llvm::APSInt Alignment(32);
+ ExprResult ICE
+ = VerifyIntegerConstantExpression(E, &Alignment,
+ diag::err_align_value_attribute_argument_not_int,
+ /*AllowFold*/ false);
+ if (ICE.isInvalid())
+ return;
+
+ if (!Alignment.isPowerOf2()) {
+ Diag(AttrLoc, diag::err_alignment_not_power_of_two)
+ << E->getSourceRange();
+ return;
+ }
+
+ D->addAttr(::new (Context)
+ AlignValueAttr(AttrRange, Context, ICE.get(),
+ SpellingListIndex));
+ return;
+ }
+
+ // Save dependent expressions in the AST to be instantiated.
+ D->addAttr(::new (Context) AlignValueAttr(TmpAttr));
+ return;
+}
+
static void handleAlignedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
if (Attr.getNumArgs() > 1) {
@@ -2773,7 +2907,7 @@ void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
// An alignment specification of zero has no effect.
if (!(TmpAttr.isAlignas() && !Alignment) &&
!llvm::isPowerOf2_64(Alignment.getZExtValue())) {
- Diag(AttrLoc, diag::err_attribute_aligned_not_power_of_two)
+ Diag(AttrLoc, diag::err_alignment_not_power_of_two)
<< E->getSourceRange();
return;
}
@@ -3006,24 +3140,75 @@ static void handleNoDebugAttr(Sema &S, Decl *D, const AttributeList &Attr) {
Attr.getAttributeSpellingListIndex()));
}
+AlwaysInlineAttr *Sema::mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
+ IdentifierInfo *Ident,
+ unsigned AttrSpellingListIndex) {
+ if (OptimizeNoneAttr *Optnone = D->getAttr<OptimizeNoneAttr>()) {
+ Diag(Range.getBegin(), diag::warn_attribute_ignored) << Ident;
+ Diag(Optnone->getLocation(), diag::note_conflicting_attribute);
+ return nullptr;
+ }
+
+ if (D->hasAttr<AlwaysInlineAttr>())
+ return nullptr;
+
+ return ::new (Context) AlwaysInlineAttr(Range, Context,
+ AttrSpellingListIndex);
+}
+
+MinSizeAttr *Sema::mergeMinSizeAttr(Decl *D, SourceRange Range,
+ unsigned AttrSpellingListIndex) {
+ if (OptimizeNoneAttr *Optnone = D->getAttr<OptimizeNoneAttr>()) {
+ Diag(Range.getBegin(), diag::warn_attribute_ignored) << "'minsize'";
+ Diag(Optnone->getLocation(), diag::note_conflicting_attribute);
+ return nullptr;
+ }
+
+ if (D->hasAttr<MinSizeAttr>())
+ return nullptr;
+
+ return ::new (Context) MinSizeAttr(Range, Context, AttrSpellingListIndex);
+}
+
+OptimizeNoneAttr *Sema::mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
+ unsigned AttrSpellingListIndex) {
+ if (AlwaysInlineAttr *Inline = D->getAttr<AlwaysInlineAttr>()) {
+ Diag(Inline->getLocation(), diag::warn_attribute_ignored) << Inline;
+ Diag(Range.getBegin(), diag::note_conflicting_attribute);
+ D->dropAttr<AlwaysInlineAttr>();
+ }
+ if (MinSizeAttr *MinSize = D->getAttr<MinSizeAttr>()) {
+ Diag(MinSize->getLocation(), diag::warn_attribute_ignored) << MinSize;
+ Diag(Range.getBegin(), diag::note_conflicting_attribute);
+ D->dropAttr<MinSizeAttr>();
+ }
+
+ if (D->hasAttr<OptimizeNoneAttr>())
+ return nullptr;
+
+ return ::new (Context) OptimizeNoneAttr(Range, Context,
+ AttrSpellingListIndex);
+}
+
static void handleAlwaysInlineAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
- if (checkAttrMutualExclusion<OptimizeNoneAttr>(S, D, Attr))
- return;
+ if (AlwaysInlineAttr *Inline = S.mergeAlwaysInlineAttr(
+ D, Attr.getRange(), Attr.getName(),
+ Attr.getAttributeSpellingListIndex()))
+ D->addAttr(Inline);
+}
- D->addAttr(::new (S.Context)
- AlwaysInlineAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+static void handleMinSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (MinSizeAttr *MinSize = S.mergeMinSizeAttr(
+ D, Attr.getRange(), Attr.getAttributeSpellingListIndex()))
+ D->addAttr(MinSize);
}
static void handleOptimizeNoneAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
- if (checkAttrMutualExclusion<AlwaysInlineAttr>(S, D, Attr))
- return;
-
- D->addAttr(::new (S.Context)
- OptimizeNoneAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ if (OptimizeNoneAttr *Optnone = S.mergeOptimizeNoneAttr(
+ D, Attr.getRange(), Attr.getAttributeSpellingListIndex()))
+ D->addAttr(Optnone);
}
static void handleGlobalAttr(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -3039,7 +3224,7 @@ static void handleGlobalAttr(Sema &S, Decl *D, const AttributeList &Attr) {
D->addAttr(::new (S.Context)
CUDAGlobalAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ Attr.getAttributeSpellingListIndex()));
}
static void handleGNUInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -3096,6 +3281,11 @@ static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) {
PascalAttr(Attr.getRange(), S.Context,
Attr.getAttributeSpellingListIndex()));
return;
+ case AttributeList::AT_VectorCall:
+ D->addAttr(::new (S.Context)
+ VectorCallAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+ return;
case AttributeList::AT_MSABI:
D->addAttr(::new (S.Context)
MSABIAttr(Attr.getRange(), S.Context,
@@ -3158,6 +3348,7 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
case AttributeList::AT_StdCall: CC = CC_X86StdCall; break;
case AttributeList::AT_ThisCall: CC = CC_X86ThisCall; break;
case AttributeList::AT_Pascal: CC = CC_X86Pascal; break;
+ case AttributeList::AT_VectorCall: CC = CC_X86VectorCall; break;
case AttributeList::AT_MSABI:
CC = Context.getTargetInfo().getTriple().isOSWindows() ? CC_C :
CC_X86_64Win64;
@@ -3242,14 +3433,6 @@ bool Sema::CheckRegparmAttr(const AttributeList &Attr, unsigned &numParams) {
static void handleLaunchBoundsAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
- // check the attribute arguments.
- if (Attr.getNumArgs() != 1 && Attr.getNumArgs() != 2) {
- // FIXME: 0 is not okay.
- S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
- << Attr.getName() << 2;
- return;
- }
-
uint32_t MaxThreads, MinBlocks = 0;
if (!checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), MaxThreads, 1))
return;
@@ -3440,29 +3623,24 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
default:
llvm_unreachable("invalid ownership attribute");
case AttributeList::AT_NSReturnsAutoreleased:
- D->addAttr(::new (S.Context)
- NSReturnsAutoreleasedAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) NSReturnsAutoreleasedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
return;
case AttributeList::AT_CFReturnsNotRetained:
- D->addAttr(::new (S.Context)
- CFReturnsNotRetainedAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) CFReturnsNotRetainedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
return;
case AttributeList::AT_NSReturnsNotRetained:
- D->addAttr(::new (S.Context)
- NSReturnsNotRetainedAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) NSReturnsNotRetainedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
return;
case AttributeList::AT_CFReturnsRetained:
- D->addAttr(::new (S.Context)
- CFReturnsRetainedAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) CFReturnsRetainedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
return;
case AttributeList::AT_NSReturnsRetained:
- D->addAttr(::new (S.Context)
- NSReturnsRetainedAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) NSReturnsRetainedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
return;
};
}
@@ -3491,9 +3669,8 @@ static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D,
return;
}
- D->addAttr(::new (S.Context)
- ObjCReturnsInnerPointerAttr(attr.getRange(), S.Context,
- attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) ObjCReturnsInnerPointerAttr(
+ attr.getRange(), S.Context, attr.getAttributeSpellingListIndex()));
}
static void handleObjCRequiresSuperAttr(Sema &S, Decl *D,
@@ -3587,7 +3764,8 @@ static void handleObjCBridgeRelatedAttr(Sema &S, Scope *Sc, Decl *D,
static void handleObjCDesignatedInitializer(Sema &S, Decl *D,
const AttributeList &Attr) {
ObjCInterfaceDecl *IFace;
- if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(D->getDeclContext()))
+ if (ObjCCategoryDecl *CatDecl =
+ dyn_cast<ObjCCategoryDecl>(D->getDeclContext()))
IFace = CatDecl->getClassInterface();
else
IFace = cast<ObjCInterfaceDecl>(D->getDeclContext());
@@ -3812,6 +3990,32 @@ static void handleInterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) {
handleARMInterruptAttr(S, D, Attr);
}
+static void handleAMDGPUNumVGPRAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ uint32_t NumRegs;
+ Expr *NumRegsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
+ if (!checkUInt32Argument(S, Attr, NumRegsExpr, NumRegs))
+ return;
+
+ D->addAttr(::new (S.Context)
+ AMDGPUNumVGPRAttr(Attr.getLoc(), S.Context,
+ NumRegs,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAMDGPUNumSGPRAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ uint32_t NumRegs;
+ Expr *NumRegsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
+ if (!checkUInt32Argument(S, Attr, NumRegsExpr, NumRegs))
+ return;
+
+ D->addAttr(::new (S.Context)
+ AMDGPUNumSGPRAttr(Attr.getLoc(), S.Context,
+ NumRegs,
+ Attr.getAttributeSpellingListIndex()));
+}
+
static void handleX86ForceAlignArgPointerAttr(Sema &S, Decl *D,
const AttributeList& Attr) {
// If we try to apply it to a function pointer, don't warn, but don't
@@ -3871,6 +4075,16 @@ static void handleDLLAttr(Sema &S, Decl *D, const AttributeList &A) {
return;
}
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isInlined() && A.getKind() == AttributeList::AT_DLLImport &&
+ !S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ // MinGW doesn't allow dllimport on inline functions.
+ S.Diag(A.getRange().getBegin(), diag::warn_attribute_ignored_on_inline)
+ << A.getName();
+ return;
+ }
+ }
+
unsigned Index = A.getAttributeSpellingListIndex();
Attr *NewAttr = A.getKind() == AttributeList::AT_DLLExport
? (Attr *)S.mergeDLLExportAttr(D, A.getRange(), Index)
@@ -4001,6 +4215,19 @@ static void handleRequiresCapabilityAttr(Sema &S, Decl *D,
D->addAttr(RCA);
}
+static void handleDeprecatedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (auto *NSD = dyn_cast<NamespaceDecl>(D)) {
+ if (NSD->isAnonymousNamespace()) {
+ S.Diag(Attr.getLoc(), diag::warn_deprecated_anonymous_namespace);
+ // Do not want to attach the attribute to the namespace because that will
+ // cause confusing diagnostic reports for uses of declarations within the
+ // namespace.
+ return;
+ }
+ }
+ handleAttrWithMessage<DeprecatedAttr>(S, D, Attr);
+}
+
/// Handles semantic checking for features that are common to all attributes,
/// such as checking whether a parameter was properly specified, or the correct
/// number of arguments were passed, etc.
@@ -4020,11 +4247,20 @@ static bool handleCommonAttributeFeatures(Sema &S, Scope *scope, Decl *D,
if (!Attr.diagnoseLangOpts(S))
return true;
- // If there are no optional arguments, then checking for the argument count
- // is trivial.
- if (Attr.getMinArgs() == Attr.getMaxArgs() &&
- !checkAttributeNumArgs(S, Attr, Attr.getMinArgs()))
- return true;
+ if (Attr.getMinArgs() == Attr.getMaxArgs()) {
+ // If there are no optional arguments, then checking for the argument count
+ // is trivial.
+ if (!checkAttributeNumArgs(S, Attr, Attr.getMinArgs()))
+ return true;
+ } else {
+ // There are optional arguments, so checking is slightly more involved.
+ if (Attr.getMinArgs() &&
+ !checkAttributeAtLeastNumArgs(S, Attr, Attr.getMinArgs()))
+ return true;
+ else if (!Attr.hasVariadicArg() && Attr.getMaxArgs() &&
+ !checkAttributeAtMostNumArgs(S, Attr, Attr.getMaxArgs()))
+ return true;
+ }
// Check whether the attribute appertains to the given subject.
if (!Attr.diagnoseAppertainsTo(S, D))
@@ -4087,6 +4323,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_NoMips16:
handleSimpleAttribute<NoMips16Attr>(S, D, Attr);
break;
+ case AttributeList::AT_AMDGPUNumVGPR:
+ handleAMDGPUNumVGPRAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_AMDGPUNumSGPR:
+ handleAMDGPUNumSGPRAttr(S, D, Attr);
+ break;
case AttributeList::AT_IBAction:
handleSimpleAttribute<IBActionAttr>(S, D, Attr);
break;
@@ -4102,6 +4344,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_Aligned:
handleAlignedAttr(S, D, Attr);
break;
+ case AttributeList::AT_AlignValue:
+ handleAlignValueAttr(S, D, Attr);
+ break;
case AttributeList::AT_AlwaysInline:
handleAlwaysInlineAttr(S, D, Attr);
break;
@@ -4133,7 +4378,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleSimpleAttribute<CXX11NoReturnAttr>(S, D, Attr);
break;
case AttributeList::AT_Deprecated:
- handleAttrWithMessage<DeprecatedAttr>(S, D, Attr);
+ handleDeprecatedAttr(S, D, Attr);
break;
case AttributeList::AT_Destructor:
handleDestructorAttr(S, D, Attr);
@@ -4145,11 +4390,14 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleExtVectorTypeAttr(S, scope, D, Attr);
break;
case AttributeList::AT_MinSize:
- handleSimpleAttribute<MinSizeAttr>(S, D, Attr);
+ handleMinSizeAttr(S, D, Attr);
break;
case AttributeList::AT_OptimizeNone:
handleOptimizeNoneAttr(S, D, Attr);
break;
+ case AttributeList::AT_FlagEnum:
+ handleSimpleAttribute<FlagEnumAttr>(S, D, Attr);
+ break;
case AttributeList::AT_Flatten:
handleSimpleAttribute<FlattenAttr>(S, D, Attr);
break;
@@ -4198,6 +4446,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_ReturnsNonNull:
handleReturnsNonNullAttr(S, D, Attr);
break;
+ case AttributeList::AT_AssumeAligned:
+ handleAssumeAlignedAttr(S, D, Attr);
+ break;
case AttributeList::AT_Overloadable:
handleSimpleAttribute<OverloadableAttr>(S, D, Attr);
break;
@@ -4392,6 +4643,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_FastCall:
case AttributeList::AT_ThisCall:
case AttributeList::AT_Pascal:
+ case AttributeList::AT_VectorCall:
case AttributeList::AT_MSABI:
case AttributeList::AT_SysVABI:
case AttributeList::AT_Pcs:
@@ -4553,19 +4805,31 @@ void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
return;
}
+ // FIXME: We should be able to handle this in TableGen as well. It would be
+ // good to have a way to specify "these attributes must appear as a group",
+ // for these. Additionally, it would be good to have a way to specify "these
+ // attribute must never appear as a group" for attributes like cold and hot.
if (!D->hasAttr<OpenCLKernelAttr>()) {
// These attributes cannot be applied to a non-kernel function.
if (Attr *A = D->getAttr<ReqdWorkGroupSizeAttr>()) {
+ // FIXME: This emits a different error message than
+ // diag::err_attribute_wrong_decl_type + ExpectedKernelFunction.
Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
D->setInvalidDecl();
- }
- if (Attr *A = D->getAttr<WorkGroupSizeHintAttr>()) {
+ } else if (Attr *A = D->getAttr<WorkGroupSizeHintAttr>()) {
Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
D->setInvalidDecl();
- }
- if (Attr *A = D->getAttr<VecTypeHintAttr>()) {
+ } else if (Attr *A = D->getAttr<VecTypeHintAttr>()) {
Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
D->setInvalidDecl();
+ } else if (Attr *A = D->getAttr<AMDGPUNumVGPRAttr>()) {
+ Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
+ << A << ExpectedKernelFunction;
+ D->setInvalidDecl();
+ } else if (Attr *A = D->getAttr<AMDGPUNumSGPRAttr>()) {
+ Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
+ << A << ExpectedKernelFunction;
+ D->setInvalidDecl();
}
}
}
@@ -4576,7 +4840,7 @@ bool Sema::ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList) {
for (const AttributeList* l = AttrList; l; l = l->getNext()) {
if (l->getKind() == AttributeList::AT_Annotate) {
- handleAnnotateAttr(*this, ASDecl, *l);
+ ProcessDeclAttribute(*this, nullptr, ASDecl, *l, l->isCXX11Attribute());
} else {
Diag(l->getLoc(), diag::err_only_annotate_after_access_spec);
return true;
@@ -4774,61 +5038,6 @@ static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &diag,
diag.Triggered = true;
}
-void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
- assert(DelayedDiagnostics.getCurrentPool());
- DelayedDiagnosticPool &poppedPool = *DelayedDiagnostics.getCurrentPool();
- DelayedDiagnostics.popWithoutEmitting(state);
-
- // When delaying diagnostics to run in the context of a parsed
- // declaration, we only want to actually emit anything if parsing
- // succeeds.
- if (!decl) return;
-
- // We emit all the active diagnostics in this pool or any of its
- // parents. In general, we'll get one pool for the decl spec
- // and a child pool for each declarator; in a decl group like:
- // deprecated_typedef foo, *bar, baz();
- // only the declarator pops will be passed decls. This is correct;
- // we really do need to consider delayed diagnostics from the decl spec
- // for each of the different declarations.
- const DelayedDiagnosticPool *pool = &poppedPool;
- do {
- for (DelayedDiagnosticPool::pool_iterator
- i = pool->pool_begin(), e = pool->pool_end(); i != e; ++i) {
- // This const_cast is a bit lame. Really, Triggered should be mutable.
- DelayedDiagnostic &diag = const_cast<DelayedDiagnostic&>(*i);
- if (diag.Triggered)
- continue;
-
- switch (diag.Kind) {
- case DelayedDiagnostic::Deprecation:
- case DelayedDiagnostic::Unavailable:
- // Don't bother giving deprecation/unavailable diagnostics if
- // the decl is invalid.
- if (!decl->isInvalidDecl())
- HandleDelayedAvailabilityCheck(diag, decl);
- break;
-
- case DelayedDiagnostic::Access:
- HandleDelayedAccessCheck(diag, decl);
- break;
-
- case DelayedDiagnostic::ForbiddenType:
- handleDelayedForbiddenType(*this, diag, decl);
- break;
- }
- }
- } while ((pool = pool->getParent()));
-}
-
-/// Given a set of delayed diagnostics, re-emit them as if they had
-/// been delayed in the current context instead of in the given pool.
-/// Essentially, this just moves them to the current pool.
-void Sema::redelayDiagnostics(DelayedDiagnosticPool &pool) {
- DelayedDiagnosticPool *curPool = DelayedDiagnostics.getCurrentPool();
- assert(curPool && "re-emitting in undelayed context not supported");
- curPool->steal(pool);
-}
static bool isDeclDeprecated(Decl *D) {
do {
@@ -4852,17 +5061,12 @@ static bool isDeclUnavailable(Decl *D) {
return false;
}
-static void
-DoEmitAvailabilityWarning(Sema &S,
- DelayedDiagnostic::DDKind K,
- Decl *Ctx,
- const NamedDecl *D,
- StringRef Message,
- SourceLocation Loc,
- const ObjCInterfaceDecl *UnknownObjCClass,
- const ObjCPropertyDecl *ObjCProperty,
- bool ObjCPropertyAccess) {
-
+static void DoEmitAvailabilityWarning(Sema &S, DelayedDiagnostic::DDKind K,
+ Decl *Ctx, const NamedDecl *D,
+ StringRef Message, SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty,
+ bool ObjCPropertyAccess) {
// Diagnostics for deprecated or unavailable.
unsigned diag, diag_message, diag_fwdclass_message;
@@ -4874,65 +5078,116 @@ DoEmitAvailabilityWarning(Sema &S,
// Don't warn if our current context is deprecated or unavailable.
switch (K) {
- case DelayedDiagnostic::Deprecation:
- if (isDeclDeprecated(Ctx))
- return;
- diag = !ObjCPropertyAccess ? diag::warn_deprecated
- : diag::warn_property_method_deprecated;
- diag_message = diag::warn_deprecated_message;
- diag_fwdclass_message = diag::warn_deprecated_fwdclass_message;
- property_note_select = /* deprecated */ 0;
- available_here_select_kind = /* deprecated */ 2;
- break;
+ case DelayedDiagnostic::Deprecation:
+ if (isDeclDeprecated(Ctx))
+ return;
+ diag = !ObjCPropertyAccess ? diag::warn_deprecated
+ : diag::warn_property_method_deprecated;
+ diag_message = diag::warn_deprecated_message;
+ diag_fwdclass_message = diag::warn_deprecated_fwdclass_message;
+ property_note_select = /* deprecated */ 0;
+ available_here_select_kind = /* deprecated */ 2;
+ break;
- case DelayedDiagnostic::Unavailable:
- if (isDeclUnavailable(Ctx))
- return;
- diag = !ObjCPropertyAccess ? diag::err_unavailable
- : diag::err_property_method_unavailable;
- diag_message = diag::err_unavailable_message;
- diag_fwdclass_message = diag::warn_unavailable_fwdclass_message;
- property_note_select = /* unavailable */ 1;
- available_here_select_kind = /* unavailable */ 0;
- break;
+ case DelayedDiagnostic::Unavailable:
+ if (isDeclUnavailable(Ctx))
+ return;
+ diag = !ObjCPropertyAccess ? diag::err_unavailable
+ : diag::err_property_method_unavailable;
+ diag_message = diag::err_unavailable_message;
+ diag_fwdclass_message = diag::warn_unavailable_fwdclass_message;
+ property_note_select = /* unavailable */ 1;
+ available_here_select_kind = /* unavailable */ 0;
+ break;
- default:
- llvm_unreachable("Neither a deprecation or unavailable kind");
+ default:
+ llvm_unreachable("Neither a deprecation or unavailable kind");
}
- DeclarationName Name = D->getDeclName();
if (!Message.empty()) {
- S.Diag(Loc, diag_message) << Name << Message;
+ S.Diag(Loc, diag_message) << D << Message;
if (ObjCProperty)
S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
- << ObjCProperty->getDeclName() << property_note_select;
+ << ObjCProperty->getDeclName() << property_note_select;
} else if (!UnknownObjCClass) {
- S.Diag(Loc, diag) << Name;
+ S.Diag(Loc, diag) << D;
if (ObjCProperty)
S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
- << ObjCProperty->getDeclName() << property_note_select;
+ << ObjCProperty->getDeclName() << property_note_select;
} else {
- S.Diag(Loc, diag_fwdclass_message) << Name;
+ S.Diag(Loc, diag_fwdclass_message) << D;
S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
}
S.Diag(D->getLocation(), diag::note_availability_specified_here)
- << D << available_here_select_kind;
+ << D << available_here_select_kind;
}
-void Sema::HandleDelayedAvailabilityCheck(DelayedDiagnostic &DD,
- Decl *Ctx) {
+static void handleDelayedAvailabilityCheck(Sema &S, DelayedDiagnostic &DD,
+ Decl *Ctx) {
DD.Triggered = true;
- DoEmitAvailabilityWarning(*this,
- (DelayedDiagnostic::DDKind) DD.Kind,
- Ctx,
- DD.getDeprecationDecl(),
- DD.getDeprecationMessage(),
- DD.Loc,
- DD.getUnknownObjCClass(),
+ DoEmitAvailabilityWarning(S, (DelayedDiagnostic::DDKind)DD.Kind, Ctx,
+ DD.getDeprecationDecl(), DD.getDeprecationMessage(),
+ DD.Loc, DD.getUnknownObjCClass(),
DD.getObjCProperty(), false);
}
+void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
+ assert(DelayedDiagnostics.getCurrentPool());
+ DelayedDiagnosticPool &poppedPool = *DelayedDiagnostics.getCurrentPool();
+ DelayedDiagnostics.popWithoutEmitting(state);
+
+ // When delaying diagnostics to run in the context of a parsed
+ // declaration, we only want to actually emit anything if parsing
+ // succeeds.
+ if (!decl) return;
+
+ // We emit all the active diagnostics in this pool or any of its
+ // parents. In general, we'll get one pool for the decl spec
+ // and a child pool for each declarator; in a decl group like:
+ // deprecated_typedef foo, *bar, baz();
+ // only the declarator pops will be passed decls. This is correct;
+ // we really do need to consider delayed diagnostics from the decl spec
+ // for each of the different declarations.
+ const DelayedDiagnosticPool *pool = &poppedPool;
+ do {
+ for (DelayedDiagnosticPool::pool_iterator
+ i = pool->pool_begin(), e = pool->pool_end(); i != e; ++i) {
+ // This const_cast is a bit lame. Really, Triggered should be mutable.
+ DelayedDiagnostic &diag = const_cast<DelayedDiagnostic&>(*i);
+ if (diag.Triggered)
+ continue;
+
+ switch (diag.Kind) {
+ case DelayedDiagnostic::Deprecation:
+ case DelayedDiagnostic::Unavailable:
+ // Don't bother giving deprecation/unavailable diagnostics if
+ // the decl is invalid.
+ if (!decl->isInvalidDecl())
+ handleDelayedAvailabilityCheck(*this, diag, decl);
+ break;
+
+ case DelayedDiagnostic::Access:
+ HandleDelayedAccessCheck(diag, decl);
+ break;
+
+ case DelayedDiagnostic::ForbiddenType:
+ handleDelayedForbiddenType(*this, diag, decl);
+ break;
+ }
+ }
+ } while ((pool = pool->getParent()));
+}
+
+/// Given a set of delayed diagnostics, re-emit them as if they had
+/// been delayed in the current context instead of in the given pool.
+/// Essentially, this just moves them to the current pool.
+void Sema::redelayDiagnostics(DelayedDiagnosticPool &pool) {
+ DelayedDiagnosticPool *curPool = DelayedDiagnostics.getCurrentPool();
+ assert(curPool && "re-emitting in undelayed context not supported");
+ curPool->steal(pool);
+}
+
void Sema::EmitAvailabilityWarning(AvailabilityDiagnostic AD,
NamedDecl *D, StringRef Message,
SourceLocation Loc,
@@ -4941,11 +5196,9 @@ void Sema::EmitAvailabilityWarning(AvailabilityDiagnostic AD,
bool ObjCPropertyAccess) {
// Delay if we're currently parsing a declaration.
if (DelayedDiagnostics.shouldDelayDiagnostics()) {
- DelayedDiagnostics.add(DelayedDiagnostic::makeAvailability(AD, Loc, D,
- UnknownObjCClass,
- ObjCProperty,
- Message,
- ObjCPropertyAccess));
+ DelayedDiagnostics.add(DelayedDiagnostic::makeAvailability(
+ AD, Loc, D, UnknownObjCClass, ObjCProperty, Message,
+ ObjCPropertyAccess));
return;
}
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index c5cd83da59d6..510738ea8168 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -36,6 +36,7 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Template.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include <map>
@@ -212,7 +213,7 @@ Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc,
ComputedEST = EST_Dynamic;
// Record the exceptions in this function's exception specification.
for (const auto &E : Proto->exceptions())
- if (ExceptionsSeen.insert(Self->Context.getCanonicalType(E)))
+ if (ExceptionsSeen.insert(Self->Context.getCanonicalType(E)).second)
Exceptions.push_back(E);
}
@@ -353,7 +354,9 @@ void Sema::ActOnParamDefaultArgumentError(Decl *param,
Param->setInvalidDecl();
UnparsedDefaultArgLocs.erase(Param);
Param->setDefaultArg(new(Context)
- OpaqueValueExpr(EqualLoc, Param->getType(), VK_RValue));
+ OpaqueValueExpr(EqualLoc,
+ Param->getType().getNonReferenceType(),
+ VK_RValue));
}
/// CheckExtraCXXDefaultArguments - Check for any extra default
@@ -385,9 +388,14 @@ void Sema::CheckExtraCXXDefaultArguments(Declarator &D) {
ParmVarDecl *Param = cast<ParmVarDecl>(chunk.Fun.Params[argIdx].Param);
if (Param->hasUnparsedDefaultArg()) {
CachedTokens *Toks = chunk.Fun.Params[argIdx].DefaultArgTokens;
+ SourceRange SR;
+ if (Toks->size() > 1)
+ SR = SourceRange((*Toks)[1].getLocation(),
+ Toks->back().getLocation());
+ else
+ SR = UnparsedDefaultArgLocs[Param];
Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc)
- << SourceRange((*Toks)[1].getLocation(),
- Toks->back().getLocation());
+ << SR;
delete Toks;
chunk.Fun.Params[argIdx].DefaultArgTokens = nullptr;
} else if (Param->getDefaultArg()) {
@@ -446,20 +454,24 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
bool OldParamHasDfl = OldParam->hasDefaultArg();
bool NewParamHasDfl = NewParam->hasDefaultArg();
- NamedDecl *ND = Old;
-
// The declaration context corresponding to the scope is the semantic
// parent, unless this is a local function declaration, in which case
// it is that surrounding function.
- DeclContext *ScopeDC = New->getLexicalDeclContext();
- if (!ScopeDC->isFunctionOrMethod())
- ScopeDC = New->getDeclContext();
- if (S && !isDeclInScope(ND, ScopeDC, S) &&
+ DeclContext *ScopeDC = New->isLocalExternDecl()
+ ? New->getLexicalDeclContext()
+ : New->getDeclContext();
+ if (S && !isDeclInScope(Old, ScopeDC, S) &&
!New->getDeclContext()->isRecord())
// Ignore default parameters of old decl if they are not in
// the same scope and this is not an out-of-line definition of
// a member function.
OldParamHasDfl = false;
+ if (New->isLocalExternDecl() != Old->isLocalExternDecl())
+ // If only one of these is a local function declaration, then they are
+ // declared in different scopes, even though isDeclInScope may think
+ // they're in the same scope. (If both are local, the scope check is
+ // sufficent, and if neither is local, then they are in the same scope.)
+ OldParamHasDfl = false;
if (OldParamHasDfl && NewParamHasDfl) {
@@ -855,7 +867,7 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
// C++1y allows types to be defined, not just declared.
if (cast<TagDecl>(DclIt)->isThisDeclarationADefinition())
SemaRef.Diag(DS->getLocStart(),
- SemaRef.getLangOpts().CPlusPlus1y
+ SemaRef.getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_constexpr_type_definition
: diag::ext_constexpr_type_definition)
<< isa<CXXConstructorDecl>(Dcl);
@@ -896,7 +908,7 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
}
}
SemaRef.Diag(VD->getLocation(),
- SemaRef.getLangOpts().CPlusPlus1y
+ SemaRef.getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_constexpr_local_var
: diag::ext_constexpr_local_var)
<< isa<CXXConstructorDecl>(Dcl);
@@ -1041,7 +1053,7 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
case Stmt::ContinueStmtClass:
// C++1y allows all of these. We don't allow them as extensions in C++11,
// because they don't make sense without variable mutation.
- if (!SemaRef.getLangOpts().CPlusPlus1y)
+ if (!SemaRef.getLangOpts().CPlusPlus14)
break;
if (!Cxx1yLoc.isValid())
Cxx1yLoc = S->getLocStart();
@@ -1115,7 +1127,7 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
if (Cxx1yLoc.isValid())
Diag(Cxx1yLoc,
- getLangOpts().CPlusPlus1y
+ getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_constexpr_body_invalid_stmt
: diag::ext_constexpr_body_invalid_stmt)
<< isa<CXXConstructorDecl>(Dcl);
@@ -1180,7 +1192,7 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
// statement. We still do, unless the return type might be void, because
// otherwise if there's no return statement, the function cannot
// be used in a core constant expression.
- bool OK = getLangOpts().CPlusPlus1y &&
+ bool OK = getLangOpts().CPlusPlus14 &&
(Dcl->getReturnType()->isVoidType() ||
Dcl->getReturnType()->isDependentType());
Diag(Dcl->getLocation(),
@@ -1190,7 +1202,7 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
}
if (ReturnStmts.size() > 1) {
Diag(ReturnStmts.back(),
- getLangOpts().CPlusPlus1y
+ getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_constexpr_body_multiple_return
: diag::ext_constexpr_body_multiple_return);
for (unsigned I = 0; I < ReturnStmts.size() - 1; ++I)
@@ -1881,7 +1893,7 @@ void Sema::CheckOverrideControl(NamedDecl *D) {
}
// C++11 [class.virtual]p5:
- // If a virtual function is marked with the virt-specifier override and
+ // If a function is marked with the virt-specifier override and
// does not override a member function of a base class, the program is
// ill-formed.
bool HasOverriddenMethods =
@@ -1891,6 +1903,30 @@ void Sema::CheckOverrideControl(NamedDecl *D) {
<< MD->getDeclName();
}
+void Sema::DiagnoseAbsenceOfOverrideControl(NamedDecl *D) {
+ if (D->isInvalidDecl() || D->hasAttr<OverrideAttr>())
+ return;
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D);
+ if (!MD || MD->isImplicit() || MD->hasAttr<FinalAttr>() ||
+ isa<CXXDestructorDecl>(MD))
+ return;
+
+ SourceLocation Loc = MD->getLocation();
+ SourceLocation SpellingLoc = Loc;
+ if (getSourceManager().isMacroArgExpansion(Loc))
+ SpellingLoc = getSourceManager().getImmediateExpansionRange(Loc).first;
+ SpellingLoc = getSourceManager().getSpellingLoc(SpellingLoc);
+ if (SpellingLoc.isValid() && getSourceManager().isInSystemHeader(SpellingLoc))
+ return;
+
+ if (MD->size_overridden_methods() > 0) {
+ Diag(MD->getLocation(), diag::warn_function_marked_not_override_overriding)
+ << MD->getDeclName();
+ const CXXMethodDecl *OMD = *MD->begin_overridden_methods();
+ Diag(OMD->getLocation(), diag::note_overridden_virtual_function);
+ }
+}
+
/// CheckIfOverriddenFunctionIsMarkedFinal - Checks whether a virtual member
/// function overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
@@ -2133,7 +2169,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
if (BitWidth) {
if (Member->isInvalidDecl()) {
// don't emit another diagnostic.
- } else if (isa<VarDecl>(Member)) {
+ } else if (isa<VarDecl>(Member) || isa<VarTemplateDecl>(Member)) {
// C++ 9.6p3: A bit-field shall not be a static member.
// "static member 'A' cannot be a bit-field"
Diag(Loc, diag::err_static_not_bitfield)
@@ -2205,18 +2241,73 @@ namespace {
Sema &S;
// List of Decls to generate a warning on. Also remove Decls that become
// initialized.
- llvm::SmallPtrSet<ValueDecl*, 4> &Decls;
+ llvm::SmallPtrSetImpl<ValueDecl*> &Decls;
+ // List of base classes of the record. Classes are removed after their
+ // initializers.
+ llvm::SmallPtrSetImpl<QualType> &BaseClasses;
+ // Vector of decls to be removed from the Decl set prior to visiting the
+ // nodes. These Decls may have been initialized in the prior initializer.
+ llvm::SmallVector<ValueDecl*, 4> DeclsToRemove;
// If non-null, add a note to the warning pointing back to the constructor.
const CXXConstructorDecl *Constructor;
+ // Variables to hold state when processing an initializer list. When
+ // InitList is true, special case initialization of FieldDecls matching
+ // InitListFieldDecl.
+ bool InitList;
+ FieldDecl *InitListFieldDecl;
+ llvm::SmallVector<unsigned, 4> InitFieldIndex;
+
public:
typedef EvaluatedExprVisitor<UninitializedFieldVisitor> Inherited;
UninitializedFieldVisitor(Sema &S,
- llvm::SmallPtrSet<ValueDecl*, 4> &Decls,
- const CXXConstructorDecl *Constructor)
- : Inherited(S.Context), S(S), Decls(Decls),
- Constructor(Constructor) { }
+ llvm::SmallPtrSetImpl<ValueDecl*> &Decls,
+ llvm::SmallPtrSetImpl<QualType> &BaseClasses)
+ : Inherited(S.Context), S(S), Decls(Decls), BaseClasses(BaseClasses),
+ Constructor(nullptr), InitList(false), InitListFieldDecl(nullptr) {}
+
+ // Returns true if the use of ME is not an uninitialized use.
+ bool IsInitListMemberExprInitialized(MemberExpr *ME,
+ bool CheckReferenceOnly) {
+ llvm::SmallVector<FieldDecl*, 4> Fields;
+ bool ReferenceField = false;
+ while (ME) {
+ FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
+ if (!FD)
+ return false;
+ Fields.push_back(FD);
+ if (FD->getType()->isReferenceType())
+ ReferenceField = true;
+ ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParenImpCasts());
+ }
+
+ // Binding a reference to an unintialized field is not an
+ // uninitialized use.
+ if (CheckReferenceOnly && !ReferenceField)
+ return true;
- void HandleMemberExpr(MemberExpr *ME, bool CheckReferenceOnly) {
+ llvm::SmallVector<unsigned, 4> UsedFieldIndex;
+ // Discard the first field since it is the field decl that is being
+ // initialized.
+ for (auto I = Fields.rbegin() + 1, E = Fields.rend(); I != E; ++I) {
+ UsedFieldIndex.push_back((*I)->getFieldIndex());
+ }
+
+ for (auto UsedIter = UsedFieldIndex.begin(),
+ UsedEnd = UsedFieldIndex.end(),
+ OrigIter = InitFieldIndex.begin(),
+ OrigEnd = InitFieldIndex.end();
+ UsedIter != UsedEnd && OrigIter != OrigEnd; ++UsedIter, ++OrigIter) {
+ if (*UsedIter < *OrigIter)
+ return true;
+ if (*UsedIter > *OrigIter)
+ break;
+ }
+
+ return false;
+ }
+
+ void HandleMemberExpr(MemberExpr *ME, bool CheckReferenceOnly,
+ bool AddressOf) {
if (isa<EnumConstantDecl>(ME->getMemberDecl()))
return;
@@ -2224,33 +2315,63 @@ namespace {
// or union.
MemberExpr *FieldME = ME;
+ bool AllPODFields = FieldME->getType().isPODType(S.Context);
+
Expr *Base = ME;
- while (isa<MemberExpr>(Base)) {
- ME = cast<MemberExpr>(Base);
+ while (MemberExpr *SubME =
+ dyn_cast<MemberExpr>(Base->IgnoreParenImpCasts())) {
- if (isa<VarDecl>(ME->getMemberDecl()))
+ if (isa<VarDecl>(SubME->getMemberDecl()))
return;
- if (FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(SubME->getMemberDecl()))
if (!FD->isAnonymousStructOrUnion())
- FieldME = ME;
+ FieldME = SubME;
+
+ if (!FieldME->getType().isPODType(S.Context))
+ AllPODFields = false;
- Base = ME->getBase();
+ Base = SubME->getBase();
}
- if (!isa<CXXThisExpr>(Base))
+ if (!isa<CXXThisExpr>(Base->IgnoreParenImpCasts()))
+ return;
+
+ if (AddressOf && AllPODFields)
return;
ValueDecl* FoundVD = FieldME->getMemberDecl();
+ if (ImplicitCastExpr *BaseCast = dyn_cast<ImplicitCastExpr>(Base)) {
+ while (isa<ImplicitCastExpr>(BaseCast->getSubExpr())) {
+ BaseCast = cast<ImplicitCastExpr>(BaseCast->getSubExpr());
+ }
+
+ if (BaseCast->getCastKind() == CK_UncheckedDerivedToBase) {
+ QualType T = BaseCast->getType();
+ if (T->isPointerType() &&
+ BaseClasses.count(T->getPointeeType())) {
+ S.Diag(FieldME->getExprLoc(), diag::warn_base_class_is_uninit)
+ << T->getPointeeType() << FoundVD;
+ }
+ }
+ }
+
if (!Decls.count(FoundVD))
return;
const bool IsReference = FoundVD->getType()->isReferenceType();
- // Prevent double warnings on use of unbounded references.
- if (IsReference != CheckReferenceOnly)
- return;
+ if (InitList && !AddressOf && FoundVD == InitListFieldDecl) {
+ // Special checking for initializer lists.
+ if (IsInitListMemberExprInitialized(ME, CheckReferenceOnly)) {
+ return;
+ }
+ } else {
+ // Prevent double warnings on use of unbounded references.
+ if (CheckReferenceOnly && !IsReference)
+ return;
+ }
unsigned diag = IsReference
? diag::warn_reference_field_is_uninit
@@ -2263,74 +2384,160 @@ namespace {
}
- void HandleValue(Expr *E) {
+ void HandleValue(Expr *E, bool AddressOf) {
E = E->IgnoreParens();
if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- HandleMemberExpr(ME, false /*CheckReferenceOnly*/);
+ HandleMemberExpr(ME, false /*CheckReferenceOnly*/,
+ AddressOf /*AddressOf*/);
return;
}
if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
- HandleValue(CO->getTrueExpr());
- HandleValue(CO->getFalseExpr());
+ Visit(CO->getCond());
+ HandleValue(CO->getTrueExpr(), AddressOf);
+ HandleValue(CO->getFalseExpr(), AddressOf);
return;
}
if (BinaryConditionalOperator *BCO =
dyn_cast<BinaryConditionalOperator>(E)) {
- HandleValue(BCO->getCommon());
- HandleValue(BCO->getFalseExpr());
+ Visit(BCO->getCond());
+ HandleValue(BCO->getFalseExpr(), AddressOf);
+ return;
+ }
+
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+ HandleValue(OVE->getSourceExpr(), AddressOf);
return;
}
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
switch (BO->getOpcode()) {
default:
- return;
+ break;
case(BO_PtrMemD):
case(BO_PtrMemI):
- HandleValue(BO->getLHS());
+ HandleValue(BO->getLHS(), AddressOf);
+ Visit(BO->getRHS());
return;
case(BO_Comma):
- HandleValue(BO->getRHS());
+ Visit(BO->getLHS());
+ HandleValue(BO->getRHS(), AddressOf);
return;
}
}
+
+ Visit(E);
+ }
+
+ void CheckInitListExpr(InitListExpr *ILE) {
+ InitFieldIndex.push_back(0);
+ for (auto Child : ILE->children()) {
+ if (InitListExpr *SubList = dyn_cast<InitListExpr>(Child)) {
+ CheckInitListExpr(SubList);
+ } else {
+ Visit(Child);
+ }
+ ++InitFieldIndex.back();
+ }
+ InitFieldIndex.pop_back();
+ }
+
+ void CheckInitializer(Expr *E, const CXXConstructorDecl *FieldConstructor,
+ FieldDecl *Field, const Type *BaseClass) {
+ // Remove Decls that may have been initialized in the previous
+ // initializer.
+ for (ValueDecl* VD : DeclsToRemove)
+ Decls.erase(VD);
+ DeclsToRemove.clear();
+
+ Constructor = FieldConstructor;
+ InitListExpr *ILE = dyn_cast<InitListExpr>(E);
+
+ if (ILE && Field) {
+ InitList = true;
+ InitListFieldDecl = Field;
+ InitFieldIndex.clear();
+ CheckInitListExpr(ILE);
+ } else {
+ InitList = false;
+ Visit(E);
+ }
+
+ if (Field)
+ Decls.erase(Field);
+ if (BaseClass)
+ BaseClasses.erase(BaseClass->getCanonicalTypeInternal());
}
void VisitMemberExpr(MemberExpr *ME) {
// All uses of unbounded reference fields will warn.
- HandleMemberExpr(ME, true /*CheckReferenceOnly*/);
-
- Inherited::VisitMemberExpr(ME);
+ HandleMemberExpr(ME, true /*CheckReferenceOnly*/, false /*AddressOf*/);
}
void VisitImplicitCastExpr(ImplicitCastExpr *E) {
- if (E->getCastKind() == CK_LValueToRValue)
- HandleValue(E->getSubExpr());
+ if (E->getCastKind() == CK_LValueToRValue) {
+ HandleValue(E->getSubExpr(), false /*AddressOf*/);
+ return;
+ }
Inherited::VisitImplicitCastExpr(E);
}
void VisitCXXConstructExpr(CXXConstructExpr *E) {
- if (E->getConstructor()->isCopyConstructor())
- if (ImplicitCastExpr* ICE = dyn_cast<ImplicitCastExpr>(E->getArg(0)))
+ if (E->getConstructor()->isCopyConstructor()) {
+ Expr *ArgExpr = E->getArg(0);
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(ArgExpr))
+ if (ILE->getNumInits() == 1)
+ ArgExpr = ILE->getInit(0);
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr))
if (ICE->getCastKind() == CK_NoOp)
- if (MemberExpr *ME = dyn_cast<MemberExpr>(ICE->getSubExpr()))
- HandleMemberExpr(ME, false /*CheckReferenceOnly*/);
-
+ ArgExpr = ICE->getSubExpr();
+ HandleValue(ArgExpr, false /*AddressOf*/);
+ return;
+ }
Inherited::VisitCXXConstructExpr(E);
}
void VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
Expr *Callee = E->getCallee();
- if (isa<MemberExpr>(Callee))
- HandleValue(Callee);
+ if (isa<MemberExpr>(Callee)) {
+ HandleValue(Callee, false /*AddressOf*/);
+ for (auto Arg : E->arguments())
+ Visit(Arg);
+ return;
+ }
Inherited::VisitCXXMemberCallExpr(E);
}
+ void VisitCallExpr(CallExpr *E) {
+ // Treat std::move as a use.
+ if (E->getNumArgs() == 1) {
+ if (FunctionDecl *FD = E->getDirectCallee()) {
+ if (FD->isInStdNamespace() && FD->getIdentifier() &&
+ FD->getIdentifier()->isStr("move")) {
+ HandleValue(E->getArg(0), false /*AddressOf*/);
+ return;
+ }
+ }
+ }
+
+ Inherited::VisitCallExpr(E);
+ }
+
+ void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ Expr *Callee = E->getCallee();
+
+ if (isa<UnresolvedLookupExpr>(Callee))
+ return Inherited::VisitCXXOperatorCallExpr(E);
+
+ Visit(Callee);
+ for (auto Arg : E->arguments())
+ HandleValue(Arg->IgnoreParenImpCasts(), false /*AddressOf*/);
+ }
+
void VisitBinaryOperator(BinaryOperator *E) {
// If a field assignment is detected, remove the field from the
// uninitiailized field set.
@@ -2338,30 +2545,32 @@ namespace {
if (MemberExpr *ME = dyn_cast<MemberExpr>(E->getLHS()))
if (FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
if (!FD->getType()->isReferenceType())
- Decls.erase(FD);
+ DeclsToRemove.push_back(FD);
+
+ if (E->isCompoundAssignmentOp()) {
+ HandleValue(E->getLHS(), false /*AddressOf*/);
+ Visit(E->getRHS());
+ return;
+ }
Inherited::VisitBinaryOperator(E);
}
- };
- static void CheckInitExprContainsUninitializedFields(
- Sema &S, Expr *E, llvm::SmallPtrSet<ValueDecl*, 4> &Decls,
- const CXXConstructorDecl *Constructor) {
- if (Decls.size() == 0)
- return;
-
- if (!E)
- return;
- if (CXXDefaultInitExpr *Default = dyn_cast<CXXDefaultInitExpr>(E)) {
- E = Default->getExpr();
- if (!E)
+ void VisitUnaryOperator(UnaryOperator *E) {
+ if (E->isIncrementDecrementOp()) {
+ HandleValue(E->getSubExpr(), false /*AddressOf*/);
return;
- // In class initializers will point to the constructor.
- UninitializedFieldVisitor(S, Decls, Constructor).Visit(E);
- } else {
- UninitializedFieldVisitor(S, Decls, nullptr).Visit(E);
+ }
+ if (E->getOpcode() == UO_AddrOf) {
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(E->getSubExpr())) {
+ HandleValue(ME->getBase(), true /*AddressOf*/);
+ return;
+ }
+ }
+
+ Inherited::VisitUnaryOperator(E);
}
- }
+ };
// Diagnose value-uses of fields to initialize themselves, e.g.
// foo(foo)
@@ -2382,6 +2591,9 @@ namespace {
const CXXRecordDecl *RD = Constructor->getParent();
+ if (RD->getDescribedClassTemplate())
+ return;
+
// Holds fields that are uninitialized.
llvm::SmallPtrSet<ValueDecl*, 4> UninitializedFields;
@@ -2394,14 +2606,39 @@ namespace {
}
}
+ llvm::SmallPtrSet<QualType, 4> UninitializedBaseClasses;
+ for (auto I : RD->bases())
+ UninitializedBaseClasses.insert(I.getType().getCanonicalType());
+
+ if (UninitializedFields.empty() && UninitializedBaseClasses.empty())
+ return;
+
+ UninitializedFieldVisitor UninitializedChecker(SemaRef,
+ UninitializedFields,
+ UninitializedBaseClasses);
+
for (const auto *FieldInit : Constructor->inits()) {
- Expr *InitExpr = FieldInit->getInit();
+ if (UninitializedFields.empty() && UninitializedBaseClasses.empty())
+ break;
- CheckInitExprContainsUninitializedFields(
- SemaRef, InitExpr, UninitializedFields, Constructor);
+ Expr *InitExpr = FieldInit->getInit();
+ if (!InitExpr)
+ continue;
- if (FieldDecl *Field = FieldInit->getAnyMember())
- UninitializedFields.erase(Field);
+ if (CXXDefaultInitExpr *Default =
+ dyn_cast<CXXDefaultInitExpr>(InitExpr)) {
+ InitExpr = Default->getExpr();
+ if (!InitExpr)
+ continue;
+ // In class initializers will point to the constructor.
+ UninitializedChecker.CheckInitializer(InitExpr, Constructor,
+ FieldInit->getAnyMember(),
+ FieldInit->getBaseClass());
+ } else {
+ UninitializedChecker.CheckInitializer(InitExpr, nullptr,
+ FieldInit->getAnyMember(),
+ FieldInit->getBaseClass());
+ }
}
}
} // namespace
@@ -2424,13 +2661,14 @@ void Sema::ActOnFinishCXXInClassMemberInitializer(Decl *D,
// Pop the notional constructor scope we created earlier.
PopFunctionScopeInfo(nullptr, D);
- FieldDecl *FD = cast<FieldDecl>(D);
- assert(FD->getInClassInitStyle() != ICIS_NoInit &&
+ FieldDecl *FD = dyn_cast<FieldDecl>(D);
+ assert((isa<MSPropertyDecl>(D) || FD->getInClassInitStyle() != ICIS_NoInit) &&
"must set init style when field is created");
if (!InitExpr) {
- FD->setInvalidDecl();
- FD->removeInClassInitializer();
+ D->setInvalidDecl();
+ if (FD)
+ FD->removeInClassInitializer();
return;
}
@@ -2581,6 +2819,11 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc) {
+ ExprResult Res = CorrectDelayedTyposInExpr(Init);
+ if (!Res.isUsable())
+ return true;
+ Init = Res.get();
+
if (!ConstructorD)
return true;
@@ -2610,8 +2853,7 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
// using a qualified name. ]
if (!SS.getScopeRep() && !TemplateTypeTy) {
// Look for a member, first.
- DeclContext::lookup_result Result
- = ClassDecl->lookup(MemberOrBase);
+ DeclContext::lookup_result Result = ClassDecl->lookup(MemberOrBase);
if (!Result.empty()) {
ValueDecl *Member;
if ((Member = dyn_cast<FieldDecl>(Result.front())) ||
@@ -2666,10 +2908,11 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
// If no results were found, try to correct typos.
TypoCorrection Corr;
- MemInitializerValidatorCCC Validator(ClassDecl);
if (R.empty() && BaseType.isNull() &&
- (Corr = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, &SS,
- Validator, CTK_ErrorRecovery, ClassDecl))) {
+ (Corr = CorrectTypo(
+ R.getLookupNameInfo(), R.getLookupKind(), S, &SS,
+ llvm::make_unique<MemInitializerValidatorCCC>(ClassDecl),
+ CTK_ErrorRecovery, ClassDecl))) {
if (FieldDecl *Member = Corr.getCorrectionDeclAs<FieldDecl>()) {
// We have found a non-static data member with a similar
// name to what was typed; complain and initialize that
@@ -2713,6 +2956,7 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
if (BaseType.isNull()) {
BaseType = Context.getTypeDeclType(TyD);
+ MarkAnyDeclReferenced(TyD->getLocation(), TyD, /*OdrUse=*/false);
if (SS.isSet())
// FIXME: preserve source range information
BaseType = Context.getElaboratedType(ETK_None, SS.getScopeRep(),
@@ -3528,19 +3772,19 @@ static bool CollectFieldInitializer(Sema &SemaRef, BaseAndFieldInfo &Info,
return false;
if (Field->hasInClassInitializer() && !Info.isImplicitCopyOrMove()) {
- Expr *DIE = CXXDefaultInitExpr::Create(SemaRef.Context,
- Info.Ctor->getLocation(), Field);
+ ExprResult DIE =
+ SemaRef.BuildCXXDefaultInitExpr(Info.Ctor->getLocation(), Field);
+ if (DIE.isInvalid())
+ return true;
CXXCtorInitializer *Init;
if (Indirect)
- Init = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Indirect,
- SourceLocation(),
- SourceLocation(), DIE,
- SourceLocation());
+ Init = new (SemaRef.Context)
+ CXXCtorInitializer(SemaRef.Context, Indirect, SourceLocation(),
+ SourceLocation(), DIE.get(), SourceLocation());
else
- Init = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Field,
- SourceLocation(),
- SourceLocation(), DIE,
- SourceLocation());
+ Init = new (SemaRef.Context)
+ CXXCtorInitializer(SemaRef.Context, Field, SourceLocation(),
+ SourceLocation(), DIE.get(), SourceLocation());
return Info.addFieldInitializer(Init);
}
@@ -3582,6 +3826,8 @@ Sema::SetDelegatingInitializer(CXXConstructorDecl *Constructor,
DelegatingCtorDecls.push_back(Constructor);
+ DiagnoseUninitializedFields(*this, Constructor);
+
return false;
}
@@ -4234,7 +4480,7 @@ void Sema::DiagnoseAbstractType(const CXXRecordDecl *RD) {
if (!SO->second.front().Method->isPure())
continue;
- if (!SeenPureMethods.insert(SO->second.front().Method))
+ if (!SeenPureMethods.insert(SO->second.front().Method).second)
continue;
Diag(SO->second.front().Method->getLocation(),
@@ -4415,10 +4661,53 @@ static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
/// \brief Check class-level dllimport/dllexport attribute.
static void checkDLLAttribute(Sema &S, CXXRecordDecl *Class) {
Attr *ClassAttr = getDLLAttr(Class);
+
+ // MSVC inherits DLL attributes to partial class template specializations.
+ if (S.Context.getTargetInfo().getCXXABI().isMicrosoft() && !ClassAttr) {
+ if (auto *Spec = dyn_cast<ClassTemplatePartialSpecializationDecl>(Class)) {
+ if (Attr *TemplateAttr =
+ getDLLAttr(Spec->getSpecializedTemplate()->getTemplatedDecl())) {
+ auto *A = cast<InheritableAttr>(TemplateAttr->clone(S.getASTContext()));
+ A->setInherited(true);
+ ClassAttr = A;
+ }
+ }
+ }
+
if (!ClassAttr)
return;
- bool ClassExported = ClassAttr->getKind() == attr::DLLExport;
+ if (!Class->isExternallyVisible()) {
+ S.Diag(Class->getLocation(), diag::err_attribute_dll_not_extern)
+ << Class << ClassAttr;
+ return;
+ }
+
+ if (S.Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ !ClassAttr->isInherited()) {
+ // Diagnose dll attributes on members of class with dll attribute.
+ for (Decl *Member : Class->decls()) {
+ if (!isa<VarDecl>(Member) && !isa<CXXMethodDecl>(Member))
+ continue;
+ InheritableAttr *MemberAttr = getDLLAttr(Member);
+ if (!MemberAttr || MemberAttr->isInherited() || Member->isInvalidDecl())
+ continue;
+
+ S.Diag(MemberAttr->getLocation(),
+ diag::err_attribute_dll_member_of_dll_class)
+ << MemberAttr << ClassAttr;
+ S.Diag(ClassAttr->getLocation(), diag::note_previous_attribute);
+ Member->setInvalidDecl();
+ }
+ }
+
+ if (Class->getDescribedClassTemplate())
+ // Don't inherit dll attribute until the template is instantiated.
+ return;
+
+ // The class is either imported or exported.
+ const bool ClassExported = ClassAttr->getKind() == attr::DLLExport;
+ const bool ClassImported = !ClassExported;
// Force declaration of implicit members so they can inherit the attribute.
S.ForceDeclarationOfImplicitMembers(Class);
@@ -4426,6 +4715,9 @@ static void checkDLLAttribute(Sema &S, CXXRecordDecl *Class) {
// FIXME: MSVC's docs say all bases must be exportable, but this doesn't
// seem to be true in practice?
+ TemplateSpecializationKind TSK =
+ Class->getTemplateSpecializationKind();
+
for (Decl *Member : Class->decls()) {
VarDecl *VD = dyn_cast<VarDecl>(Member);
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member);
@@ -4434,50 +4726,57 @@ static void checkDLLAttribute(Sema &S, CXXRecordDecl *Class) {
if (!VD && !MD)
continue;
- // Don't process deleted methods.
- if (MD && MD->isDeleted())
- continue;
+ if (MD) {
+ // Don't process deleted methods.
+ if (MD->isDeleted())
+ continue;
- if (MD && MD->isMoveAssignmentOperator() && !ClassExported &&
- MD->isInlined()) {
- // Current MSVC versions don't export the move assignment operators, so
- // don't attempt to import them if we have a definition.
- continue;
- }
+ if (MD->isMoveAssignmentOperator() && ClassImported && MD->isInlined()) {
+ // Current MSVC versions don't export the move assignment operators, so
+ // don't attempt to import them if we have a definition.
+ continue;
+ }
- if (InheritableAttr *MemberAttr = getDLLAttr(Member)) {
- if (S.Context.getTargetInfo().getCXXABI().isMicrosoft() &&
- !MemberAttr->isInherited() && !ClassAttr->isInherited()) {
- S.Diag(MemberAttr->getLocation(),
- diag::err_attribute_dll_member_of_dll_class)
- << MemberAttr << ClassAttr;
- S.Diag(ClassAttr->getLocation(), diag::note_previous_attribute);
- Member->setInvalidDecl();
+ if (MD->isInlined() && ClassImported &&
+ !S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ // MinGW does not import inline functions.
continue;
}
- } else {
+ }
+
+ if (!getDLLAttr(Member)) {
auto *NewAttr =
cast<InheritableAttr>(ClassAttr->clone(S.getASTContext()));
NewAttr->setInherited(true);
Member->addAttr(NewAttr);
}
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member)) {
- if (ClassExported) {
- if (MD->isUserProvided()) {
- // Instantiate non-default methods.
- S.MarkFunctionReferenced(Class->getLocation(), MD);
- } else if (!MD->isTrivial() || MD->isExplicitlyDefaulted() ||
- MD->isCopyAssignmentOperator() ||
- MD->isMoveAssignmentOperator()) {
- // Instantiate non-trivial or explicitly defaulted methods, and the
- // copy assignment / move assignment operators.
- S.MarkFunctionReferenced(Class->getLocation(), MD);
- // Resolve its exception specification; CodeGen needs it.
- auto *FPT = MD->getType()->getAs<FunctionProtoType>();
- S.ResolveExceptionSpec(Class->getLocation(), FPT);
- S.ActOnFinishInlineMethodDef(MD);
- }
+ if (MD && ClassExported) {
+ if (MD->isUserProvided()) {
+ // Instantiate non-default class member functions ...
+
+ // .. except for certain kinds of template specializations.
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ continue;
+ if (TSK == TSK_ImplicitInstantiation && !ClassAttr->isInherited())
+ continue;
+
+ S.MarkFunctionReferenced(Class->getLocation(), MD);
+
+ // The function will be passed to the consumer when its definition is
+ // encountered.
+ } else if (!MD->isTrivial() || MD->isExplicitlyDefaulted() ||
+ MD->isCopyAssignmentOperator() ||
+ MD->isMoveAssignmentOperator()) {
+ // Synthesize and instantiate non-trivial implicit methods, explicitly
+ // defaulted methods, and the copy and move assignment operators. The
+ // latter are exported even if they are trivial, because the address of
+ // an operator can be taken and should compare equal accross libraries.
+ S.MarkFunctionReferenced(Class->getLocation(), MD);
+
+ // There is no later point when we will see the definition of this
+ // function, so pass it to the consumer now.
+ S.Consumer.HandleTopLevelDecl(DeclGroupRef(MD));
}
}
}
@@ -4563,13 +4862,18 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
}
}
+ bool HasMethodWithOverrideControl = false,
+ HasOverridingMethodWithoutOverrideControl = false;
if (!Record->isDependentType()) {
for (auto *M : Record->methods()) {
// See if a method overloads virtual methods in a base
// class without overriding any.
if (!M->isStatic())
DiagnoseHiddenVirtualMethods(M);
-
+ if (M->hasAttr<OverrideAttr>())
+ HasMethodWithOverrideControl = true;
+ else if (M->size_overridden_methods() > 0)
+ HasOverridingMethodWithoutOverrideControl = true;
// Check whether the explicitly-defaulted special members are valid.
if (!M->isInvalidDecl() && M->isExplicitlyDefaulted())
CheckExplicitlyDefaultedSpecialMember(M);
@@ -4588,41 +4892,12 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
}
}
- // C++11 [dcl.constexpr]p8: A constexpr specifier for a non-static member
- // function that is not a constructor declares that member function to be
- // const. [...] The class of which that function is a member shall be
- // a literal type.
- //
- // If the class has virtual bases, any constexpr members will already have
- // been diagnosed by the checks performed on the member declaration, so
- // suppress this (less useful) diagnostic.
- //
- // We delay this until we know whether an explicitly-defaulted (or deleted)
- // destructor for the class is trivial.
- if (LangOpts.CPlusPlus11 && !Record->isDependentType() &&
- !Record->isLiteral() && !Record->getNumVBases()) {
- for (const auto *M : Record->methods()) {
- if (M->isConstexpr() && M->isInstance() && !isa<CXXConstructorDecl>(M)) {
- switch (Record->getTemplateSpecializationKind()) {
- case TSK_ImplicitInstantiation:
- case TSK_ExplicitInstantiationDeclaration:
- case TSK_ExplicitInstantiationDefinition:
- // If a template instantiates to a non-literal type, but its members
- // instantiate to constexpr functions, the template is technically
- // ill-formed, but we allow it for sanity.
- continue;
-
- case TSK_Undeclared:
- case TSK_ExplicitSpecialization:
- RequireLiteralType(M->getLocation(), Context.getRecordType(Record),
- diag::err_constexpr_method_non_literal);
- break;
- }
-
- // Only produce one error per class.
- break;
- }
- }
+ if (HasMethodWithOverrideControl &&
+ HasOverridingMethodWithoutOverrideControl) {
+ // At least one method has the 'override' control declared.
+ // Diagnose all other overridden methods which do not have 'override' specified on them.
+ for (auto *M : Record->methods())
+ DiagnoseAbsenceOfOverrideControl(M);
}
// ms_struct is a request to use the same ABI rules as MSVC. Check
@@ -4723,7 +4998,7 @@ static bool defaultedSpecialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl,
case Sema::CXXCopyAssignment:
case Sema::CXXMoveAssignment:
- if (!S.getLangOpts().CPlusPlus1y)
+ if (!S.getLangOpts().CPlusPlus14)
return false;
// In C++1y, we need to perform overload resolution.
Ctor = false;
@@ -4818,8 +5093,8 @@ static FunctionProtoType::ExtProtoInfo getImplicitMethodEPI(Sema &S,
FunctionProtoType::ExtProtoInfo EPI;
// Build an exception specification pointing back at this member.
- EPI.ExceptionSpecType = EST_Unevaluated;
- EPI.ExceptionSpecDecl = MD;
+ EPI.ExceptionSpec.Type = EST_Unevaluated;
+ EPI.ExceptionSpec.SourceDecl = MD;
// Set the calling convention to the default for C++ instance methods.
EPI.ExtInfo = EPI.ExtInfo.withCallingConv(
@@ -4834,14 +5109,10 @@ void Sema::EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD)
return;
// Evaluate the exception specification.
- ImplicitExceptionSpecification ExceptSpec =
- computeImplicitExceptionSpec(*this, Loc, MD);
-
- FunctionProtoType::ExtProtoInfo EPI;
- ExceptSpec.getEPI(EPI);
+ auto ESI = computeImplicitExceptionSpec(*this, Loc, MD).getExceptionSpec();
// Update the type of the special member to use it.
- UpdateExceptionSpec(MD, EPI);
+ UpdateExceptionSpec(MD, ESI);
// A user-provided destructor can be defined outside the class. When that
// happens, be sure to update the exception specification on both
@@ -4849,7 +5120,7 @@ void Sema::EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD)
const FunctionProtoType *CanonicalFPT =
MD->getCanonicalDecl()->getType()->castAs<FunctionProtoType>();
if (CanonicalFPT->getExceptionSpecType() == EST_Unevaluated)
- UpdateExceptionSpec(MD->getCanonicalDecl(), EPI);
+ UpdateExceptionSpec(MD->getCanonicalDecl(), ESI);
}
void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
@@ -4911,7 +5182,7 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
// A defaulted special member cannot have cv-qualifiers.
if (Type->getTypeQuals()) {
Diag(MD->getLocation(), diag::err_defaulted_special_member_quals)
- << (CSM == CXXMoveAssignment) << getLangOpts().CPlusPlus1y;
+ << (CSM == CXXMoveAssignment) << getLangOpts().CPlusPlus14;
HadError = true;
}
}
@@ -4960,7 +5231,7 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
// destructors in C++1y), this is checked elsewhere.
bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, RD, CSM,
HasConstParam);
- if ((getLangOpts().CPlusPlus1y ? !isa<CXXDestructorDecl>(MD)
+ if ((getLangOpts().CPlusPlus14 ? !isa<CXXDestructorDecl>(MD)
: isa<CXXConstructorDecl>(MD)) &&
MD->isConstexpr() && !Constexpr &&
MD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
@@ -4995,10 +5266,10 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
// -- it is implicitly considered to have the same exception-specification
// as if it had been implicitly declared,
FunctionProtoType::ExtProtoInfo EPI = Type->getExtProtoInfo();
- EPI.ExceptionSpecType = EST_Unevaluated;
- EPI.ExceptionSpecDecl = MD;
+ EPI.ExceptionSpec.Type = EST_Unevaluated;
+ EPI.ExceptionSpec.SourceDecl = MD;
MD->setType(Context.getFunctionType(ReturnType,
- ArrayRef<QualType>(&ArgType,
+ llvm::makeArrayRef(&ArgType,
ExpectedParams),
EPI));
}
@@ -5026,11 +5297,18 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
/// C++11 [dcl.fct.def.default]p2.
void Sema::CheckExplicitlyDefaultedMemberExceptionSpec(
CXXMethodDecl *MD, const FunctionProtoType *SpecifiedType) {
+ // If the exception specification was explicitly specified but hadn't been
+ // parsed when the method was defaulted, grab it now.
+ if (SpecifiedType->getExceptionSpecType() == EST_Unparsed)
+ SpecifiedType =
+ MD->getTypeSourceInfo()->getType()->castAs<FunctionProtoType>();
+
// Compute the implicit exception specification.
CallingConv CC = Context.getDefaultCallingConvention(/*IsVariadic=*/false,
/*IsCXXMethod=*/true);
FunctionProtoType::ExtProtoInfo EPI(CC);
- computeImplicitExceptionSpec(*this, MD->getLocation(), MD).getEPI(EPI);
+ EPI.ExceptionSpec = computeImplicitExceptionSpec(*this, MD->getLocation(), MD)
+ .getExceptionSpec();
const FunctionProtoType *ImplicitType = cast<FunctionProtoType>(
Context.getFunctionType(Context.VoidTy, None, EPI));
@@ -5043,27 +5321,21 @@ void Sema::CheckExplicitlyDefaultedMemberExceptionSpec(
}
void Sema::CheckDelayedMemberExceptionSpecs() {
- SmallVector<std::pair<const CXXDestructorDecl *, const CXXDestructorDecl *>,
- 2> Checks;
- SmallVector<std::pair<CXXMethodDecl *, const FunctionProtoType *>, 2> Specs;
+ decltype(DelayedExceptionSpecChecks) Checks;
+ decltype(DelayedDefaultedMemberExceptionSpecs) Specs;
- std::swap(Checks, DelayedDestructorExceptionSpecChecks);
+ std::swap(Checks, DelayedExceptionSpecChecks);
std::swap(Specs, DelayedDefaultedMemberExceptionSpecs);
// Perform any deferred checking of exception specifications for virtual
// destructors.
- for (unsigned i = 0, e = Checks.size(); i != e; ++i) {
- const CXXDestructorDecl *Dtor = Checks[i].first;
- assert(!Dtor->getParent()->isDependentType() &&
- "Should not ever add destructors of templates into the list.");
- CheckOverridingFunctionExceptionSpec(Dtor, Checks[i].second);
- }
+ for (auto &Check : Checks)
+ CheckOverridingFunctionExceptionSpec(Check.first, Check.second);
// Check that any explicitly-defaulted methods have exception specifications
// compatible with their implicit exception specifications.
- for (unsigned I = 0, N = Specs.size(); I != N; ++I)
- CheckExplicitlyDefaultedMemberExceptionSpec(Specs[I].first,
- Specs[I].second);
+ for (auto &Spec : Specs)
+ CheckExplicitlyDefaultedMemberExceptionSpec(Spec.first, Spec.second);
}
namespace {
@@ -5491,6 +5763,13 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
if (SMI.shouldDeleteForAllConstMembers())
return true;
+ if (getLangOpts().CUDA) {
+ // We should delete the special member in CUDA mode if target inference
+ // failed.
+ return inferCUDATargetForImplicitSpecialMember(RD, CSM, MD, SMI.ConstArg,
+ Diagnose);
+ }
+
return false;
}
@@ -5907,7 +6186,7 @@ namespace {
/// \brief Check whether any most overriden method from MD in Methods
static bool CheckMostOverridenMethods(const CXXMethodDecl *MD,
- const llvm::SmallPtrSet<const CXXMethodDecl *, 8>& Methods) {
+ const llvm::SmallPtrSetImpl<const CXXMethodDecl *>& Methods) {
if (MD->size_overridden_methods() == 0)
return Methods.count(MD->getCanonicalDecl());
for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
@@ -5945,7 +6224,14 @@ static bool FindHiddenVirtualMethod(const CXXBaseSpecifier *Specifier,
if (!MD->isVirtual())
continue;
// If the method we are checking overrides a method from its base
- // don't warn about the other overloaded methods.
+ // don't warn about the other overloaded methods. Clang deviates from GCC
+ // by only diagnosing overloads of inherited virtual functions that do not
+ // override any other virtual functions in the base. GCC's
+ // -Woverloaded-virtual diagnoses any derived function hiding a virtual
+ // function from a base class. These cases may be better served by a
+ // warning (not specific to virtual functions) on call sites when the call
+ // would select a different function from the base class, were it visible.
+ // See FIXME in test/SemaCXX/warn-overload-virtual.cpp for an example.
if (!Data.S->IsOverload(Data.Method, MD, false))
return true;
// Collect the overload only if its hidden.
@@ -5962,7 +6248,7 @@ static bool FindHiddenVirtualMethod(const CXXBaseSpecifier *Specifier,
/// \brief Add the most overriden methods from MD to Methods
static void AddMostOverridenMethods(const CXXMethodDecl *MD,
- llvm::SmallPtrSet<const CXXMethodDecl *, 8>& Methods) {
+ llvm::SmallPtrSetImpl<const CXXMethodDecl *>& Methods) {
if (MD->size_overridden_methods() == 0)
Methods.insert(MD->getCanonicalDecl());
for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
@@ -6515,6 +6801,22 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
return Context.getFunctionType(Context.VoidTy, None, EPI);
}
+static void extendLeft(SourceRange &R, const SourceRange &Before) {
+ if (Before.isInvalid())
+ return;
+ R.setBegin(Before.getBegin());
+ if (R.getEnd().isInvalid())
+ R.setEnd(Before.getEnd());
+}
+
+static void extendRight(SourceRange &R, const SourceRange &After) {
+ if (After.isInvalid())
+ return;
+ if (R.getBegin().isInvalid())
+ R.setBegin(After.getBegin());
+ R.setEnd(After.getEnd());
+}
+
/// CheckConversionDeclarator - Called by ActOnDeclarator to check the
/// well-formednes of the conversion function declarator @p D with
/// type @p R. If there are any errors in the declarator, this routine
@@ -6536,7 +6838,9 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
SC = SC_None;
}
- QualType ConvType = GetTypeFromParser(D.getName().ConversionFunctionId);
+ TypeSourceInfo *ConvTSI = nullptr;
+ QualType ConvType =
+ GetTypeFromParser(D.getName().ConversionFunctionId, &ConvTSI);
if (D.getDeclSpec().hasTypeSpecifier() && !D.isInvalidType()) {
// Conversion functions don't have return types, but the parser will
@@ -6570,9 +6874,75 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
// Diagnose "&operator bool()" and other such nonsense. This
// is actually a gcc extension which we don't support.
if (Proto->getReturnType() != ConvType) {
- Diag(D.getIdentifierLoc(), diag::err_conv_function_with_complex_decl)
- << Proto->getReturnType();
- D.setInvalidType();
+ bool NeedsTypedef = false;
+ SourceRange Before, After;
+
+ // Walk the chunks and extract information on them for our diagnostic.
+ bool PastFunctionChunk = false;
+ for (auto &Chunk : D.type_objects()) {
+ switch (Chunk.Kind) {
+ case DeclaratorChunk::Function:
+ if (!PastFunctionChunk) {
+ if (Chunk.Fun.HasTrailingReturnType) {
+ TypeSourceInfo *TRT = nullptr;
+ GetTypeFromParser(Chunk.Fun.getTrailingReturnType(), &TRT);
+ if (TRT) extendRight(After, TRT->getTypeLoc().getSourceRange());
+ }
+ PastFunctionChunk = true;
+ break;
+ }
+ // Fall through.
+ case DeclaratorChunk::Array:
+ NeedsTypedef = true;
+ extendRight(After, Chunk.getSourceRange());
+ break;
+
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ extendLeft(Before, Chunk.getSourceRange());
+ break;
+
+ case DeclaratorChunk::Paren:
+ extendLeft(Before, Chunk.Loc);
+ extendRight(After, Chunk.EndLoc);
+ break;
+ }
+ }
+
+ SourceLocation Loc = Before.isValid() ? Before.getBegin() :
+ After.isValid() ? After.getBegin() :
+ D.getIdentifierLoc();
+ auto &&DB = Diag(Loc, diag::err_conv_function_with_complex_decl);
+ DB << Before << After;
+
+ if (!NeedsTypedef) {
+ DB << /*don't need a typedef*/0;
+
+ // If we can provide a correct fix-it hint, do so.
+ if (After.isInvalid() && ConvTSI) {
+ SourceLocation InsertLoc =
+ PP.getLocForEndOfToken(ConvTSI->getTypeLoc().getLocEnd());
+ DB << FixItHint::CreateInsertion(InsertLoc, " ")
+ << FixItHint::CreateInsertionFromRange(
+ InsertLoc, CharSourceRange::getTokenRange(Before))
+ << FixItHint::CreateRemoval(Before);
+ }
+ } else if (!Proto->getReturnType()->isDependentType()) {
+ DB << /*typedef*/1 << Proto->getReturnType();
+ } else if (getLangOpts().CPlusPlus11) {
+ DB << /*alias template*/2 << Proto->getReturnType();
+ } else {
+ DB << /*might not be fixable*/3;
+ }
+
+ // Recover by incorporating the other type chunks into the result type.
+ // Note, this does *not* change the name of the function. This is compatible
+ // with the GCC extension:
+ // struct S { &operator int(); } s;
+ // int &r = s.operator int(); // ok in GCC
+ // S::operator int&() {} // error in GCC, function name is 'operator int'.
ConvType = Proto->getReturnType();
}
@@ -6893,7 +7263,7 @@ NamespaceDecl *Sema::getOrCreateStdNamespace() {
/*PrevDecl=*/nullptr);
getStdNamespace()->setImplicit(true);
}
-
+
return getStdNamespace();
}
@@ -7054,12 +7424,11 @@ static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident) {
- NamespaceValidatorCCC Validator;
R.clear();
- if (TypoCorrection Corrected = S.CorrectTypo(R.getLookupNameInfo(),
- R.getLookupKind(), Sc, &SS,
- Validator,
- Sema::CTK_ErrorRecovery)) {
+ if (TypoCorrection Corrected =
+ S.CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), Sc, &SS,
+ llvm::make_unique<NamespaceValidatorCCC>(),
+ Sema::CTK_ErrorRecovery)) {
if (DeclContext *DC = S.computeDeclContext(SS, false)) {
std::string CorrectedStr(Corrected.getAsString(S.getLangOpts()));
bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
@@ -7124,6 +7493,10 @@ Decl *Sema::ActOnUsingDirective(Scope *S,
NamedDecl *Named = R.getFoundDecl();
assert((isa<NamespaceDecl>(Named) || isa<NamespaceAliasDecl>(Named))
&& "expected namespace decl");
+
+ // The use of a nested name specifier may trigger deprecation warnings.
+ DiagnoseUseOfDecl(Named, IdentLoc);
+
// C++ [namespace.udir]p1:
// A using-directive specifies that the names in the nominated
// namespace can be used in the scope in which the
@@ -7685,11 +8058,12 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
// Try to correct typos if possible.
if (R.empty()) {
- UsingValidatorCCC CCC(HasTypenameKeyword, IsInstantiation, SS.getScopeRep(),
- dyn_cast<CXXRecordDecl>(CurContext));
- if (TypoCorrection Corrected = CorrectTypo(R.getLookupNameInfo(),
- R.getLookupKind(), S, &SS, CCC,
- CTK_ErrorRecovery)){
+ if (TypoCorrection Corrected = CorrectTypo(
+ R.getLookupNameInfo(), R.getLookupKind(), S, &SS,
+ llvm::make_unique<UsingValidatorCCC>(
+ HasTypenameKeyword, IsInstantiation, SS.getScopeRep(),
+ dyn_cast<CXXRecordDecl>(CurContext)),
+ CTK_ErrorRecovery)) {
// We reject any correction for which ND would be NULL.
NamedDecl *ND = Corrected.getCorrectionDecl();
@@ -7874,7 +8248,7 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
// If we weren't able to compute a valid scope, it must be a
// dependent class scope.
if (!NamedContext || NamedContext->isRecord()) {
- auto *RD = dyn_cast<CXXRecordDecl>(NamedContext);
+ auto *RD = dyn_cast_or_null<CXXRecordDecl>(NamedContext);
if (RD && RequireCompleteDeclContext(const_cast<CXXScopeSpec&>(SS), RD))
RD = nullptr;
@@ -8164,6 +8538,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S,
TypeAliasTemplateDecl::Create(Context, CurContext, UsingLoc,
Name.Identifier, TemplateParams,
NewTD);
+ NewTD->setDescribedAliasTemplate(NewDecl);
NewDecl->setAccess(AS);
@@ -8185,57 +8560,65 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S,
return NewND;
}
-Decl *Sema::ActOnNamespaceAliasDef(Scope *S,
- SourceLocation NamespaceLoc,
- SourceLocation AliasLoc,
- IdentifierInfo *Alias,
- CXXScopeSpec &SS,
- SourceLocation IdentLoc,
- IdentifierInfo *Ident) {
+Decl *Sema::ActOnNamespaceAliasDef(Scope *S, SourceLocation NamespaceLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias, CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *Ident) {
// Lookup the namespace name.
LookupResult R(*this, Ident, IdentLoc, LookupNamespaceName);
LookupParsedName(R, S, &SS);
+ if (R.isAmbiguous())
+ return nullptr;
+
+ if (R.empty()) {
+ if (!TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, Ident)) {
+ Diag(IdentLoc, diag::err_expected_namespace_name) << SS.getRange();
+ return nullptr;
+ }
+ }
+ assert(!R.isAmbiguous() && !R.empty());
+
// Check if we have a previous declaration with the same name.
- NamedDecl *PrevDecl
- = LookupSingleName(S, Alias, AliasLoc, LookupOrdinaryName,
- ForRedeclaration);
+ NamedDecl *PrevDecl = LookupSingleName(S, Alias, AliasLoc, LookupOrdinaryName,
+ ForRedeclaration);
if (PrevDecl && !isDeclInScope(PrevDecl, CurContext, S))
PrevDecl = nullptr;
+ NamedDecl *ND = R.getFoundDecl();
+
if (PrevDecl) {
if (NamespaceAliasDecl *AD = dyn_cast<NamespaceAliasDecl>(PrevDecl)) {
// We already have an alias with the same name that points to the same
- // namespace, so don't create a new one.
- // FIXME: At some point, we'll want to create the (redundant)
- // declaration to maintain better source information.
- if (!R.isAmbiguous() && !R.empty() &&
- AD->getNamespace()->Equals(getNamespaceDecl(R.getFoundDecl())))
+ // namespace; check that it matches.
+ if (!AD->getNamespace()->Equals(getNamespaceDecl(ND))) {
+ Diag(AliasLoc, diag::err_redefinition_different_namespace_alias)
+ << Alias;
+ Diag(PrevDecl->getLocation(), diag::note_previous_namespace_alias)
+ << AD->getNamespace();
return nullptr;
- }
-
- unsigned DiagID = isa<NamespaceDecl>(PrevDecl) ? diag::err_redefinition :
- diag::err_redefinition_different_kind;
- Diag(AliasLoc, DiagID) << Alias;
- Diag(PrevDecl->getLocation(), diag::note_previous_definition);
- return nullptr;
- }
-
- if (R.isAmbiguous())
- return nullptr;
-
- if (R.empty()) {
- if (!TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, Ident)) {
- Diag(IdentLoc, diag::err_expected_namespace_name) << SS.getRange();
+ }
+ } else {
+ unsigned DiagID = isa<NamespaceDecl>(PrevDecl)
+ ? diag::err_redefinition
+ : diag::err_redefinition_different_kind;
+ Diag(AliasLoc, DiagID) << Alias;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
return nullptr;
}
}
+ // The use of a nested name specifier may trigger deprecation warnings.
+ DiagnoseUseOfDecl(ND, IdentLoc);
+
NamespaceAliasDecl *AliasDecl =
NamespaceAliasDecl::Create(Context, CurContext, NamespaceLoc, AliasLoc,
Alias, SS.getWithLocInContext(Context),
- IdentLoc, R.getFoundDecl());
+ IdentLoc, ND);
+ if (PrevDecl)
+ AliasDecl->setPreviousDecl(cast<NamespaceAliasDecl>(PrevDecl));
PushOnScopeChains(AliasDecl, S);
return AliasDecl;
@@ -8285,22 +8668,6 @@ Sema::ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
if (F->hasInClassInitializer()) {
if (Expr *E = F->getInClassInitializer())
ExceptSpec.CalledExpr(E);
- else if (!F->isInvalidDecl())
- // DR1351:
- // If the brace-or-equal-initializer of a non-static data member
- // invokes a defaulted default constructor of its class or of an
- // enclosing class in a potentially evaluated subexpression, the
- // program is ill-formed.
- //
- // This resolution is unworkable: the exception specification of the
- // default constructor can be needed in an unevaluated context, in
- // particular, in the operand of a noexcept-expression, and we can be
- // unable to compute an exception specification for an enclosed class.
- //
- // We do not allow an in-class initializer to require the evaluation
- // of the exception specification for any in-class initializer whose
- // definition is not lexically complete.
- Diag(Loc, diag::err_in_class_initializer_references_def_ctor) << MD;
} else if (const RecordType *RecordTy
= Context.getBaseElementType(F->getType())->getAs<RecordType>()) {
CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
@@ -8368,9 +8735,6 @@ Sema::ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD) {
if (F->hasInClassInitializer()) {
if (Expr *E = F->getInClassInitializer())
ExceptSpec.CalledExpr(E);
- else if (!F->isInvalidDecl())
- Diag(CD->getLocation(),
- diag::err_in_class_initializer_references_def_ctor) << CD;
} else if (const RecordType *RecordTy
= Context.getBaseElementType(F->getType())->getAs<RecordType>()) {
CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
@@ -8392,7 +8756,7 @@ struct DeclaringSpecialMember {
DeclaringSpecialMember(Sema &S, CXXRecordDecl *RD, Sema::CXXSpecialMember CSM)
: S(S), D(RD, CSM) {
- WasAlreadyBeingDeclared = !S.SpecialMembersBeingDeclared.insert(D);
+ WasAlreadyBeingDeclared = !S.SpecialMembersBeingDeclared.insert(D).second;
if (WasAlreadyBeingDeclared)
// This almost never happens, but if it does, ensure that our cache
// doesn't contain a stale result.
@@ -8421,7 +8785,7 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
// user-declared constructor for class X, a default constructor is
// implicitly declared. An implicitly-declared default constructor
// is an inline public member of its class.
- assert(ClassDecl->needsImplicitDefaultConstructor() &&
+ assert(ClassDecl->needsImplicitDefaultConstructor() &&
"Should not build implicit default constructor!");
DeclaringSpecialMember DSM(*this, ClassDecl, CXXDefaultConstructor);
@@ -8445,7 +8809,13 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
/*isImplicitlyDeclared=*/true, Constexpr);
DefaultCon->setAccess(AS_public);
DefaultCon->setDefaulted();
- DefaultCon->setImplicit();
+
+ if (getLangOpts().CUDA) {
+ inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDefaultConstructor,
+ DefaultCon,
+ /* ConstRHS */ false,
+ /* Diagnose */ false);
+ }
// Build an exception specification pointing back at this constructor.
FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, DefaultCon);
@@ -8488,6 +8858,11 @@ void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
return;
}
+ // The exception specification is needed because we are defining the
+ // function.
+ ResolveExceptionSpec(CurrentLocation,
+ Constructor->getType()->castAs<FunctionProtoType>());
+
SourceLocation Loc = Constructor->getLocEnd().isValid()
? Constructor->getLocEnd()
: Constructor->getLocation();
@@ -8597,7 +8972,7 @@ private:
void inherit(const CXXConstructorDecl *Ctor) {
const FunctionProtoType *CtorType =
Ctor->getType()->castAs<FunctionProtoType>();
- ArrayRef<QualType> ArgTypes(CtorType->getParamTypes());
+ ArrayRef<QualType> ArgTypes = CtorType->getParamTypes();
FunctionProtoType::ExtProtoInfo EPI = CtorType->getExtProtoInfo();
SourceLocation UsingLoc = getUsingLoc(Ctor->getParent());
@@ -8737,8 +9112,8 @@ private:
// Build an unevaluated exception specification for this constructor.
const FunctionProtoType *FPT = DerivedType->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
- EPI.ExceptionSpecType = EST_Unevaluated;
- EPI.ExceptionSpecDecl = DerivedCtor;
+ EPI.ExceptionSpec.Type = EST_Unevaluated;
+ EPI.ExceptionSpec.SourceDecl = DerivedCtor;
DerivedCtor->setType(Context.getFunctionType(FPT->getReturnType(),
FPT->getParamTypes(), EPI));
@@ -8900,7 +9275,13 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
/*isImplicitlyDeclared=*/true);
Destructor->setAccess(AS_public);
Destructor->setDefaulted();
- Destructor->setImplicit();
+
+ if (getLangOpts().CUDA) {
+ inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDestructor,
+ Destructor,
+ /* ConstRHS */ false,
+ /* Diagnose */ false);
+ }
// Build an exception specification pointing back at this destructor.
FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, Destructor);
@@ -8952,6 +9333,11 @@ void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
return;
}
+ // The exception specification is needed because we are defining the
+ // function.
+ ResolveExceptionSpec(CurrentLocation,
+ Destructor->getType()->castAs<FunctionProtoType>());
+
SourceLocation Loc = Destructor->getLocEnd().isValid()
? Destructor->getLocEnd()
: Destructor->getLocation();
@@ -8971,7 +9357,7 @@ void Sema::ActOnFinishCXXMemberDecls() {
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(CurContext)) {
if (Record->isInvalidDecl()) {
DelayedDefaultedMemberExceptionSpecs.clear();
- DelayedDestructorExceptionSpecChecks.clear();
+ DelayedExceptionSpecChecks.clear();
return;
}
}
@@ -8995,8 +9381,8 @@ void Sema::AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
// the only thing of interest in the destructor type is its extended info.
// The return and arguments are fixed.
FunctionProtoType::ExtProtoInfo EPI = DtorType->getExtProtoInfo();
- EPI.ExceptionSpecType = EST_Unevaluated;
- EPI.ExceptionSpecDecl = Destructor;
+ EPI.ExceptionSpec.Type = EST_Unevaluated;
+ EPI.ExceptionSpec.SourceDecl = Destructor;
Destructor->setType(Context.getFunctionType(Context.VoidTy, None, EPI));
// FIXME: If the destructor has a body that could throw, and the newly created
@@ -9032,7 +9418,7 @@ class RefBuilder: public ExprBuilder {
QualType VarType;
public:
- virtual Expr *build(Sema &S, SourceLocation Loc) const override {
+ Expr *build(Sema &S, SourceLocation Loc) const override {
return assertNotNull(S.BuildDeclRefExpr(Var, VarType, VK_LValue, Loc).get());
}
@@ -9042,7 +9428,7 @@ public:
class ThisBuilder: public ExprBuilder {
public:
- virtual Expr *build(Sema &S, SourceLocation Loc) const override {
+ Expr *build(Sema &S, SourceLocation Loc) const override {
return assertNotNull(S.ActOnCXXThis(Loc).getAs<Expr>());
}
};
@@ -9054,7 +9440,7 @@ class CastBuilder: public ExprBuilder {
const CXXCastPath &Path;
public:
- virtual Expr *build(Sema &S, SourceLocation Loc) const override {
+ Expr *build(Sema &S, SourceLocation Loc) const override {
return assertNotNull(S.ImpCastExprToType(Builder.build(S, Loc), Type,
CK_UncheckedDerivedToBase, Kind,
&Path).get());
@@ -9069,7 +9455,7 @@ class DerefBuilder: public ExprBuilder {
const ExprBuilder &Builder;
public:
- virtual Expr *build(Sema &S, SourceLocation Loc) const override {
+ Expr *build(Sema &S, SourceLocation Loc) const override {
return assertNotNull(
S.CreateBuiltinUnaryOp(Loc, UO_Deref, Builder.build(S, Loc)).get());
}
@@ -9085,7 +9471,7 @@ class MemberBuilder: public ExprBuilder {
LookupResult &MemberLookup;
public:
- virtual Expr *build(Sema &S, SourceLocation Loc) const override {
+ Expr *build(Sema &S, SourceLocation Loc) const override {
return assertNotNull(S.BuildMemberReferenceExpr(
Builder.build(S, Loc), Type, Loc, IsArrow, SS, SourceLocation(),
nullptr, MemberLookup, nullptr).get());
@@ -9101,7 +9487,7 @@ class MoveCastBuilder: public ExprBuilder {
const ExprBuilder &Builder;
public:
- virtual Expr *build(Sema &S, SourceLocation Loc) const override {
+ Expr *build(Sema &S, SourceLocation Loc) const override {
return assertNotNull(CastForMoving(S, Builder.build(S, Loc)));
}
@@ -9112,7 +9498,7 @@ class LvalueConvBuilder: public ExprBuilder {
const ExprBuilder &Builder;
public:
- virtual Expr *build(Sema &S, SourceLocation Loc) const override {
+ Expr *build(Sema &S, SourceLocation Loc) const override {
return assertNotNull(
S.DefaultLvalueConversion(Builder.build(S, Loc)).get());
}
@@ -9125,7 +9511,7 @@ class SubscriptBuilder: public ExprBuilder {
const ExprBuilder &Index;
public:
- virtual Expr *build(Sema &S, SourceLocation Loc) const override {
+ Expr *build(Sema &S, SourceLocation Loc) const override {
return assertNotNull(S.CreateBuiltinArraySubscriptExpr(
Base.build(S, Loc), Loc, Index.build(S, Loc), Loc).get());
}
@@ -9521,6 +9907,13 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
CopyAssignment->setDefaulted();
CopyAssignment->setImplicit();
+ if (getLangOpts().CUDA) {
+ inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyAssignment,
+ CopyAssignment,
+ /* ConstRHS */ Const,
+ /* Diagnose */ false);
+ }
+
// Build an exception specification pointing back at this member.
FunctionProtoType::ExtProtoInfo EPI =
getImplicitMethodEPI(*this, CopyAssignment);
@@ -9795,6 +10188,11 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
}
}
+ // The exception specification is needed because we are defining the
+ // function.
+ ResolveExceptionSpec(CurrentLocation,
+ CopyAssignOperator->getType()->castAs<FunctionProtoType>());
+
if (Invalid) {
CopyAssignOperator->setInvalidDecl();
return;
@@ -9898,6 +10296,13 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
MoveAssignment->setDefaulted();
MoveAssignment->setImplicit();
+ if (getLangOpts().CUDA) {
+ inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveAssignment,
+ MoveAssignment,
+ /* ConstRHS */ false,
+ /* Diagnose */ false);
+ }
+
// Build an exception specification pointing back at this member.
FunctionProtoType::ExtProtoInfo EPI =
getImplicitMethodEPI(*this, MoveAssignment);
@@ -10217,6 +10622,11 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
}
}
+ // The exception specification is needed because we are defining the
+ // function.
+ ResolveExceptionSpec(CurrentLocation,
+ MoveAssignOperator->getType()->castAs<FunctionProtoType>());
+
if (Invalid) {
MoveAssignOperator->setInvalidDecl();
return;
@@ -10319,6 +10729,13 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
CopyConstructor->setAccess(AS_public);
CopyConstructor->setDefaulted();
+ if (getLangOpts().CUDA) {
+ inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyConstructor,
+ CopyConstructor,
+ /* ConstRHS */ Const,
+ /* Diagnose */ false);
+ }
+
// Build an exception specification pointing back at this member.
FunctionProtoType::ExtProtoInfo EPI =
getImplicitMethodEPI(*this, CopyConstructor);
@@ -10386,6 +10803,11 @@ void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
ActOnCompoundStmt(Loc, Loc, None, /*isStmtExpr=*/false).getAs<Stmt>());
}
+ // The exception specification is needed because we are defining the
+ // function.
+ ResolveExceptionSpec(CurrentLocation,
+ CopyConstructor->getType()->castAs<FunctionProtoType>());
+
CopyConstructor->markUsed(Context);
MarkVTableUsed(CurrentLocation, ClassDecl);
@@ -10484,6 +10906,13 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
MoveConstructor->setAccess(AS_public);
MoveConstructor->setDefaulted();
+ if (getLangOpts().CUDA) {
+ inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveConstructor,
+ MoveConstructor,
+ /* ConstRHS */ false,
+ /* Diagnose */ false);
+ }
+
// Build an exception specification pointing back at this member.
FunctionProtoType::ExtProtoInfo EPI =
getImplicitMethodEPI(*this, MoveConstructor);
@@ -10546,6 +10975,11 @@ void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
Loc, Loc, None, /*isStmtExpr=*/ false).getAs<Stmt>());
}
+ // The exception specification is needed because we are defining the
+ // function.
+ ResolveExceptionSpec(CurrentLocation,
+ MoveConstructor->getType()->castAs<FunctionProtoType>());
+
MoveConstructor->markUsed(Context);
MarkVTableUsed(CurrentLocation, ClassDecl);
@@ -10765,6 +11199,56 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
ParenRange);
}
+ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
+ assert(Field->hasInClassInitializer());
+
+ // If we already have the in-class initializer nothing needs to be done.
+ if (Field->getInClassInitializer())
+ return CXXDefaultInitExpr::Create(Context, Loc, Field);
+
+ // Maybe we haven't instantiated the in-class initializer. Go check the
+ // pattern FieldDecl to see if it has one.
+ CXXRecordDecl *ParentRD = cast<CXXRecordDecl>(Field->getParent());
+
+ if (isTemplateInstantiation(ParentRD->getTemplateSpecializationKind())) {
+ CXXRecordDecl *ClassPattern = ParentRD->getTemplateInstantiationPattern();
+ DeclContext::lookup_result Lookup =
+ ClassPattern->lookup(Field->getDeclName());
+ assert(Lookup.size() == 1);
+ FieldDecl *Pattern = cast<FieldDecl>(Lookup[0]);
+ if (InstantiateInClassInitializer(Loc, Field, Pattern,
+ getTemplateInstantiationArgs(Field)))
+ return ExprError();
+ return CXXDefaultInitExpr::Create(Context, Loc, Field);
+ }
+
+ // DR1351:
+ // If the brace-or-equal-initializer of a non-static data member
+ // invokes a defaulted default constructor of its class or of an
+ // enclosing class in a potentially evaluated subexpression, the
+ // program is ill-formed.
+ //
+ // This resolution is unworkable: the exception specification of the
+ // default constructor can be needed in an unevaluated context, in
+ // particular, in the operand of a noexcept-expression, and we can be
+ // unable to compute an exception specification for an enclosed class.
+ //
+ // Any attempt to resolve the exception specification of a defaulted default
+ // constructor before the initializer is lexically complete will ultimately
+ // come here at which point we can diagnose it.
+ RecordDecl *OutermostClass = ParentRD->getOuterLexicalRecordContext();
+ if (OutermostClass == ParentRD) {
+ Diag(Field->getLocEnd(), diag::err_in_class_initializer_not_yet_parsed)
+ << ParentRD << Field;
+ } else {
+ Diag(Field->getLocEnd(),
+ diag::err_in_class_initializer_not_yet_parsed_outer_class)
+ << ParentRD << OutermostClass << Field;
+ }
+
+ return ExprError();
+}
+
void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
if (VD->isInvalidDecl()) return;
@@ -10834,8 +11318,7 @@ Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
DiagnoseSentinelCalls(Constructor, Loc, AllArgs);
CheckConstructorCall(Constructor,
- llvm::makeArrayRef<const Expr *>(AllArgs.data(),
- AllArgs.size()),
+ llvm::makeArrayRef(AllArgs.data(), AllArgs.size()),
Proto, Loc);
return Invalid;
@@ -12118,8 +12601,12 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
}
// Mark templated-scope function declarations as unsupported.
- if (FD->getNumTemplateParameterLists())
+ if (FD->getNumTemplateParameterLists() && SS.isValid()) {
+ Diag(FD->getLocation(), diag::warn_template_qualified_friend_unsupported)
+ << SS.getScopeRep() << SS.getRange()
+ << cast<CXXRecordDecl>(CurContext);
FrD->setUnsupportedFriend(true);
+ }
}
return ND;
@@ -12220,11 +12707,6 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
CheckExplicitlyDefaultedSpecialMember(MD);
- // The exception specification is needed because we are defining the
- // function.
- ResolveExceptionSpec(DefaultLoc,
- MD->getType()->castAs<FunctionProtoType>());
-
if (MD->isInvalidDecl())
return;
@@ -12538,7 +13020,7 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
Pos = VTablesUsed.insert(std::make_pair(Class, DefinitionRequired));
if (!Pos.second) {
// If we already had an entry, check to see if we are promoting this vtable
- // to required a definition. If so, we need to reappend to the VTableUses
+ // to require a definition. If so, we need to reappend to the VTableUses
// list, since we may have already processed the first entry.
if (DefinitionRequired && !Pos.first->second) {
Pos.first->second = true;
@@ -12739,10 +13221,10 @@ void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
AllToInit.push_back(Member);
// Be sure that the destructor is accessible and is marked as referenced.
- if (const RecordType *RecordTy
- = Context.getBaseElementType(Field->getType())
- ->getAs<RecordType>()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (const RecordType *RecordTy =
+ Context.getBaseElementType(Field->getType())
+ ->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) {
MarkFunctionReferenced(Field->getLocation(), Destructor);
CheckDestructorAccess(Field->getLocation(), Destructor,
@@ -12780,7 +13262,7 @@ void DelegatingCycleHelper(CXXConstructorDecl* Ctor,
// Avoid dereferencing a null pointer here.
*TCanonical = Target? Target->getCanonicalDecl() : nullptr;
- if (!Current.insert(Canonical))
+ if (!Current.insert(Canonical).second)
return;
// We know that beyond here, we aren't chaining into a cycle.
@@ -12898,6 +13380,7 @@ bool Sema::checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method) {
FindCXXThisExpr Finder(*this);
switch (Proto->getExceptionSpecType()) {
+ case EST_Unparsed:
case EST_Uninstantiated:
case EST_Unevaluated:
case EST_BasicNoexcept:
@@ -12934,27 +13417,27 @@ bool Sema::checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method) {
else if (const auto *G = dyn_cast<PtGuardedByAttr>(A))
Arg = G->getArg();
else if (const auto *AA = dyn_cast<AcquiredAfterAttr>(A))
- Args = ArrayRef<Expr *>(AA->args_begin(), AA->args_size());
+ Args = llvm::makeArrayRef(AA->args_begin(), AA->args_size());
else if (const auto *AB = dyn_cast<AcquiredBeforeAttr>(A))
- Args = ArrayRef<Expr *>(AB->args_begin(), AB->args_size());
+ Args = llvm::makeArrayRef(AB->args_begin(), AB->args_size());
else if (const auto *ETLF = dyn_cast<ExclusiveTrylockFunctionAttr>(A)) {
Arg = ETLF->getSuccessValue();
- Args = ArrayRef<Expr *>(ETLF->args_begin(), ETLF->args_size());
+ Args = llvm::makeArrayRef(ETLF->args_begin(), ETLF->args_size());
} else if (const auto *STLF = dyn_cast<SharedTrylockFunctionAttr>(A)) {
Arg = STLF->getSuccessValue();
- Args = ArrayRef<Expr *>(STLF->args_begin(), STLF->args_size());
+ Args = llvm::makeArrayRef(STLF->args_begin(), STLF->args_size());
} else if (const auto *LR = dyn_cast<LockReturnedAttr>(A))
Arg = LR->getArg();
else if (const auto *LE = dyn_cast<LocksExcludedAttr>(A))
- Args = ArrayRef<Expr *>(LE->args_begin(), LE->args_size());
+ Args = llvm::makeArrayRef(LE->args_begin(), LE->args_size());
else if (const auto *RC = dyn_cast<RequiresCapabilityAttr>(A))
- Args = ArrayRef<Expr *>(RC->args_begin(), RC->args_size());
+ Args = llvm::makeArrayRef(RC->args_begin(), RC->args_size());
else if (const auto *AC = dyn_cast<AcquireCapabilityAttr>(A))
- Args = ArrayRef<Expr *>(AC->args_begin(), AC->args_size());
+ Args = llvm::makeArrayRef(AC->args_begin(), AC->args_size());
else if (const auto *AC = dyn_cast<TryAcquireCapabilityAttr>(A))
- Args = ArrayRef<Expr *>(AC->args_begin(), AC->args_size());
+ Args = llvm::makeArrayRef(AC->args_begin(), AC->args_size());
else if (const auto *RC = dyn_cast<ReleaseCapabilityAttr>(A))
- Args = ArrayRef<Expr *>(RC->args_begin(), RC->args_size());
+ Args = llvm::makeArrayRef(RC->args_begin(), RC->args_size());
if (Arg && !Finder.TraverseStmt(Arg))
return true;
@@ -12968,28 +13451,29 @@ bool Sema::checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method) {
return false;
}
-void
-Sema::checkExceptionSpecification(ExceptionSpecificationType EST,
- ArrayRef<ParsedType> DynamicExceptions,
- ArrayRef<SourceRange> DynamicExceptionRanges,
- Expr *NoexceptExpr,
- SmallVectorImpl<QualType> &Exceptions,
- FunctionProtoType::ExtProtoInfo &EPI) {
+void Sema::checkExceptionSpecification(
+ bool IsTopLevel, ExceptionSpecificationType EST,
+ ArrayRef<ParsedType> DynamicExceptions,
+ ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr,
+ SmallVectorImpl<QualType> &Exceptions,
+ FunctionProtoType::ExceptionSpecInfo &ESI) {
Exceptions.clear();
- EPI.ExceptionSpecType = EST;
+ ESI.Type = EST;
if (EST == EST_Dynamic) {
Exceptions.reserve(DynamicExceptions.size());
for (unsigned ei = 0, ee = DynamicExceptions.size(); ei != ee; ++ei) {
// FIXME: Preserve type source info.
QualType ET = GetTypeFromParser(DynamicExceptions[ei]);
- SmallVector<UnexpandedParameterPack, 2> Unexpanded;
- collectUnexpandedParameterPacks(ET, Unexpanded);
- if (!Unexpanded.empty()) {
- DiagnoseUnexpandedParameterPacks(DynamicExceptionRanges[ei].getBegin(),
- UPPC_ExceptionType,
- Unexpanded);
- continue;
+ if (IsTopLevel) {
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ collectUnexpandedParameterPacks(ET, Unexpanded);
+ if (!Unexpanded.empty()) {
+ DiagnoseUnexpandedParameterPacks(
+ DynamicExceptionRanges[ei].getBegin(), UPPC_ExceptionType,
+ Unexpanded);
+ continue;
+ }
}
// Check that the type is valid for an exception spec, and
@@ -12997,11 +13481,10 @@ Sema::checkExceptionSpecification(ExceptionSpecificationType EST,
if (!CheckSpecifiedExceptionType(ET, DynamicExceptionRanges[ei]))
Exceptions.push_back(ET);
}
- EPI.NumExceptions = Exceptions.size();
- EPI.Exceptions = Exceptions.data();
+ ESI.Exceptions = Exceptions;
return;
}
-
+
if (EST == EST_ComputedNoexcept) {
// If an error occurred, there's no expression here.
if (NoexceptExpr) {
@@ -13009,59 +13492,59 @@ Sema::checkExceptionSpecification(ExceptionSpecificationType EST,
NoexceptExpr->getType()->getCanonicalTypeUnqualified() ==
Context.BoolTy) &&
"Parser should have made sure that the expression is boolean");
- if (NoexceptExpr && DiagnoseUnexpandedParameterPack(NoexceptExpr)) {
- EPI.ExceptionSpecType = EST_BasicNoexcept;
+ if (IsTopLevel && NoexceptExpr &&
+ DiagnoseUnexpandedParameterPack(NoexceptExpr)) {
+ ESI.Type = EST_BasicNoexcept;
return;
}
-
+
if (!NoexceptExpr->isValueDependent())
NoexceptExpr = VerifyIntegerConstantExpression(NoexceptExpr, nullptr,
diag::err_noexcept_needs_constant_expression,
/*AllowFold*/ false).get();
- EPI.NoexceptExpr = NoexceptExpr;
+ ESI.NoexceptExpr = NoexceptExpr;
}
return;
}
}
-/// IdentifyCUDATarget - Determine the CUDA compilation target for this function
-Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) {
- // Implicitly declared functions (e.g. copy constructors) are
- // __host__ __device__
- if (D->isImplicit())
- return CFT_HostDevice;
-
- if (D->hasAttr<CUDAGlobalAttr>())
- return CFT_Global;
+void Sema::actOnDelayedExceptionSpecification(Decl *MethodD,
+ ExceptionSpecificationType EST,
+ SourceRange SpecificationRange,
+ ArrayRef<ParsedType> DynamicExceptions,
+ ArrayRef<SourceRange> DynamicExceptionRanges,
+ Expr *NoexceptExpr) {
+ if (!MethodD)
+ return;
- if (D->hasAttr<CUDADeviceAttr>()) {
- if (D->hasAttr<CUDAHostAttr>())
- return CFT_HostDevice;
- return CFT_Device;
- }
+ // Dig out the method we're referring to.
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(MethodD))
+ MethodD = FunTmpl->getTemplatedDecl();
- return CFT_Host;
-}
+ CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(MethodD);
+ if (!Method)
+ return;
-bool Sema::CheckCUDATarget(CUDAFunctionTarget CallerTarget,
- CUDAFunctionTarget CalleeTarget) {
- // CUDA B.1.1 "The __device__ qualifier declares a function that is...
- // Callable from the device only."
- if (CallerTarget == CFT_Host && CalleeTarget == CFT_Device)
- return true;
+ // Check the exception specification.
+ llvm::SmallVector<QualType, 4> Exceptions;
+ FunctionProtoType::ExceptionSpecInfo ESI;
+ checkExceptionSpecification(/*IsTopLevel*/true, EST, DynamicExceptions,
+ DynamicExceptionRanges, NoexceptExpr, Exceptions,
+ ESI);
- // CUDA B.1.2 "The __global__ qualifier declares a function that is...
- // Callable from the host only."
- // CUDA B.1.3 "The __host__ qualifier declares a function that is...
- // Callable from the host only."
- if ((CallerTarget == CFT_Device || CallerTarget == CFT_Global) &&
- (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global))
- return true;
+ // Update the exception specification on the function type.
+ Context.adjustExceptionSpec(Method, ESI, /*AsWritten*/true);
- if (CallerTarget == CFT_HostDevice && CalleeTarget != CFT_HostDevice)
- return true;
+ if (Method->isStatic())
+ checkThisInStaticMemberFunctionExceptionSpec(Method);
- return false;
+ if (Method->isVirtual()) {
+ // Check overrides, which we previously had to delay.
+ for (CXXMethodDecl::method_iterator O = Method->begin_overridden_methods(),
+ OEnd = Method->end_overridden_methods();
+ O != OEnd; ++O)
+ CheckOverridingFunctionExceptionSpec(Method, *O);
+ }
}
/// HandleMSProperty - Analyze a __delcspec(property) field of a C++ class.
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
index b5205b3e6238..7e3da941b339 100644
--- a/lib/Sema/SemaDeclObjC.cpp
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -118,10 +118,7 @@ void Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
// a suitable return type, but the new (overriding) method does not have
// a suitable return type.
QualType ResultType = NewMethod->getReturnType();
- SourceRange ResultTypeRange;
- if (const TypeSourceInfo *ResultTypeInfo =
- NewMethod->getReturnTypeSourceInfo())
- ResultTypeRange = ResultTypeInfo->getTypeLoc().getSourceRange();
+ SourceRange ResultTypeRange = NewMethod->getReturnTypeSourceRange();
// Figure out which class this method is part of, if any.
ObjCInterfaceDecl *CurrentClass
@@ -204,15 +201,13 @@ bool Sema::CheckARCMethodDecl(ObjCMethodDecl *method) {
case OMF_autorelease:
case OMF_retainCount:
case OMF_self:
+ case OMF_initialize:
case OMF_performSelector:
return false;
case OMF_dealloc:
if (!Context.hasSameType(method->getReturnType(), Context.VoidTy)) {
- SourceRange ResultTypeRange;
- if (const TypeSourceInfo *ResultTypeInfo =
- method->getReturnTypeSourceInfo())
- ResultTypeRange = ResultTypeInfo->getTypeLoc().getSourceRange();
+ SourceRange ResultTypeRange = method->getReturnTypeSourceRange();
if (ResultTypeRange.isInvalid())
Diag(method->getLocation(), diag::error_dealloc_bad_result_type)
<< method->getReturnType()
@@ -359,6 +354,7 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
case OMF_copy:
case OMF_new:
case OMF_self:
+ case OMF_initialize:
case OMF_performSelector:
break;
}
@@ -520,10 +516,11 @@ ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
if (!PrevDecl) {
// Try to correct for a typo in the superclass name without correcting
// to the class we're defining.
- ObjCInterfaceValidatorCCC Validator(IDecl);
- if (TypoCorrection Corrected = CorrectTypo(
- DeclarationNameInfo(SuperName, SuperLoc), LookupOrdinaryName, TUScope,
- nullptr, Validator, CTK_ErrorRecovery)) {
+ if (TypoCorrection Corrected =
+ CorrectTypo(DeclarationNameInfo(SuperName, SuperLoc),
+ LookupOrdinaryName, TUScope, nullptr,
+ llvm::make_unique<ObjCInterfaceValidatorCCC>(IDecl),
+ CTK_ErrorRecovery)) {
diagnoseTypo(Corrected, PDiag(diag::err_undef_superclass_suggest)
<< SuperName << ClassName);
PrevDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>();
@@ -790,10 +787,10 @@ Sema::FindProtocolDeclaration(bool WarnOnDeclarations,
ObjCProtocolDecl *PDecl = LookupProtocol(ProtocolId[i].first,
ProtocolId[i].second);
if (!PDecl) {
- DeclFilterCCC<ObjCProtocolDecl> Validator;
TypoCorrection Corrected = CorrectTypo(
DeclarationNameInfo(ProtocolId[i].first, ProtocolId[i].second),
- LookupObjCProtocolName, TUScope, nullptr, Validator,
+ LookupObjCProtocolName, TUScope, nullptr,
+ llvm::make_unique<DeclFilterCCC<ObjCProtocolDecl>>(),
CTK_ErrorRecovery);
if ((PDecl = Corrected.getCorrectionDeclAs<ObjCProtocolDecl>()))
diagnoseTypo(Corrected, PDiag(diag::err_undeclared_protocol_suggest)
@@ -1031,11 +1028,9 @@ Decl *Sema::ActOnStartClassImplementation(
} else {
// We did not find anything with the name ClassName; try to correct for
// typos in the class name.
- ObjCInterfaceValidatorCCC Validator;
- TypoCorrection Corrected =
- CorrectTypo(DeclarationNameInfo(ClassName, ClassLoc),
- LookupOrdinaryName, TUScope, nullptr, Validator,
- CTK_NonError);
+ TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(ClassName, ClassLoc), LookupOrdinaryName, TUScope,
+ nullptr, llvm::make_unique<ObjCInterfaceValidatorCCC>(), CTK_NonError);
if (Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) {
// Suggest the (potentially) correct interface name. Don't provide a
// code-modification hint or use the typo name for recovery, because
@@ -1362,9 +1357,9 @@ static bool CheckMethodOverrideReturn(Sema &S,
? diag::warn_conflicting_overriding_ret_type_modifiers
: diag::warn_conflicting_ret_type_modifiers))
<< MethodImpl->getDeclName()
- << getTypeRange(MethodImpl->getReturnTypeSourceInfo());
+ << MethodImpl->getReturnTypeSourceRange();
S.Diag(MethodDecl->getLocation(), diag::note_previous_declaration)
- << getTypeRange(MethodDecl->getReturnTypeSourceInfo());
+ << MethodDecl->getReturnTypeSourceRange();
}
else
return false;
@@ -1402,11 +1397,11 @@ static bool CheckMethodOverrideReturn(Sema &S,
S.Diag(MethodImpl->getLocation(), DiagID)
<< MethodImpl->getDeclName() << MethodDecl->getReturnType()
<< MethodImpl->getReturnType()
- << getTypeRange(MethodImpl->getReturnTypeSourceInfo());
+ << MethodImpl->getReturnTypeSourceRange();
S.Diag(MethodDecl->getLocation(), IsOverridingMode
? diag::note_previous_declaration
: diag::note_previous_definition)
- << getTypeRange(MethodDecl->getReturnTypeSourceInfo());
+ << MethodDecl->getReturnTypeSourceRange();
return false;
}
@@ -1521,6 +1516,7 @@ static bool checkMethodFamilyMismatch(Sema &S, ObjCMethodDecl *impl,
case OMF_finalize:
case OMF_retainCount:
case OMF_self:
+ case OMF_initialize:
case OMF_performSelector:
// Mismatches for these methods don't change ownership
// conventions, so we don't care.
@@ -1819,7 +1815,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
// Check and see if instance methods in class interface have been
// implemented in the implementation class. If so, their types match.
for (auto *I : CDecl->instance_methods()) {
- if (!InsMapSeen.insert(I->getSelector()))
+ if (!InsMapSeen.insert(I->getSelector()).second)
continue;
if (!I->isPropertyAccessor() &&
!InsMap.count(I->getSelector())) {
@@ -1846,7 +1842,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
// Check and see if class methods in class interface have been
// implemented in the implementation class. If so, their types match.
for (auto *I : CDecl->class_methods()) {
- if (!ClsMapSeen.insert(I->getSelector()))
+ if (!ClsMapSeen.insert(I->getSelector()).second)
continue;
if (!ClsMap.count(I->getSelector())) {
if (ImmediateClass)
@@ -2008,13 +2004,12 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
IncompleteImpl, InsMap, ClsMap, CDecl,
ExplicitImplProtocols);
DiagnoseUnimplementedProperties(S, IMPDecl, CDecl,
- /* SynthesizeProperties */ false);
+ /*SynthesizeProperties=*/false);
}
} else
llvm_unreachable("invalid ObjCContainerDecl type.");
}
-/// ActOnForwardClassDeclaration -
Sema::DeclGroupPtrTy
Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
IdentifierInfo **IdentList,
@@ -2041,10 +2036,11 @@ Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
} else {
// a forward class declaration matching a typedef name of a class refers
// to the underlying class. Just ignore the forward class with a warning
- // as this will force the intended behavior which is to lookup the typedef
- // name.
+ // as this will force the intended behavior which is to lookup the
+ // typedef name.
if (isa<ObjCObjectType>(TDD->getUnderlyingType())) {
- Diag(AtClassLoc, diag::warn_forward_class_redefinition) << IdentList[i];
+ Diag(AtClassLoc, diag::warn_forward_class_redefinition)
+ << IdentList[i];
Diag(PrevDecl->getLocation(), diag::note_previous_definition);
continue;
}
@@ -2107,7 +2103,12 @@ static bool matchTypes(ASTContext &Context, Sema::MethodMatchStrategy strategy,
// validate the basic, low-level compatibility of the two types.
// As a minimum, require the sizes and alignments to match.
- if (Context.getTypeInfo(left) != Context.getTypeInfo(right))
+ TypeInfo LeftTI = Context.getTypeInfo(left);
+ TypeInfo RightTI = Context.getTypeInfo(right);
+ if (LeftTI.Width != RightTI.Width)
+ return false;
+
+ if (LeftTI.Align != RightTI.Align)
return false;
// Consider all the kinds of non-dependent canonical types:
@@ -2159,7 +2160,13 @@ static bool tryMatchRecordTypes(ASTContext &Context,
return false;
// Require size and alignment to match.
- if (Context.getTypeInfo(lt) != Context.getTypeInfo(rt)) return false;
+ TypeInfo LeftTI = Context.getTypeInfo(lt);
+ TypeInfo RightTI = Context.getTypeInfo(rt);
+ if (LeftTI.Width != RightTI.Width)
+ return false;
+
+ if (LeftTI.Align != RightTI.Align)
+ return false;
// Require fields to match.
RecordDecl::field_iterator li = left->field_begin(), le = left->field_end();
@@ -2210,21 +2217,22 @@ bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *left,
return true;
}
-void Sema::addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method) {
+void Sema::addMethodToGlobalList(ObjCMethodList *List,
+ ObjCMethodDecl *Method) {
// Record at the head of the list whether there were 0, 1, or >= 2 methods
// inside categories.
- if (ObjCCategoryDecl *
- CD = dyn_cast<ObjCCategoryDecl>(Method->getDeclContext()))
+ if (ObjCCategoryDecl *CD =
+ dyn_cast<ObjCCategoryDecl>(Method->getDeclContext()))
if (!CD->IsClassExtension() && List->getBits() < 2)
- List->setBits(List->getBits()+1);
+ List->setBits(List->getBits() + 1);
// If the list is empty, make it a singleton list.
- if (List->Method == nullptr) {
- List->Method = Method;
+ if (List->getMethod() == nullptr) {
+ List->setMethod(Method);
List->setNext(nullptr);
return;
}
-
+
// We've seen a method with this name, see if we have already seen this type
// signature.
ObjCMethodList *Previous = List;
@@ -2233,35 +2241,42 @@ void Sema::addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method) {
if (getLangOpts().Modules && !getLangOpts().CurrentModule.empty())
continue;
- if (!MatchTwoMethodDeclarations(Method, List->Method))
+ if (!MatchTwoMethodDeclarations(Method, List->getMethod()))
continue;
-
- ObjCMethodDecl *PrevObjCMethod = List->Method;
+
+ ObjCMethodDecl *PrevObjCMethod = List->getMethod();
// Propagate the 'defined' bit.
if (Method->isDefined())
PrevObjCMethod->setDefined(true);
-
+ else {
+ // Objective-C doesn't allow an @interface for a class after its
+ // @implementation. So if Method is not defined and there already is
+ // an entry for this type signature, Method has to be for a different
+ // class than PrevObjCMethod.
+ List->setHasMoreThanOneDecl(true);
+ }
+
// If a method is deprecated, push it in the global pool.
// This is used for better diagnostics.
if (Method->isDeprecated()) {
if (!PrevObjCMethod->isDeprecated())
- List->Method = Method;
+ List->setMethod(Method);
}
- // If new method is unavailable, push it into global pool
+ // If the new method is unavailable, push it into global pool
// unless previous one is deprecated.
if (Method->isUnavailable()) {
if (PrevObjCMethod->getAvailability() < AR_Deprecated)
- List->Method = Method;
+ List->setMethod(Method);
}
-
+
return;
}
-
+
// We have a new signature for an existing method - add it.
// This is extremely rare. Only 1% of Cocoa selectors are "overloaded".
ObjCMethodList *Mem = BumpAlloc.Allocate<ObjCMethodList>();
- Previous->setNext(new (Mem) ObjCMethodList(Method, nullptr));
+ Previous->setNext(new (Mem) ObjCMethodList(Method));
}
/// \brief Read the contents of the method pool for a given selector from
@@ -2284,7 +2299,7 @@ void Sema::AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl,
if (Pos == MethodPool.end())
Pos = MethodPool.insert(std::make_pair(Method->getSelector(),
GlobalMethods())).first;
-
+
Method->setDefined(impl);
ObjCMethodList &Entry = instance ? Pos->second.first : Pos->second.second;
@@ -2310,6 +2325,32 @@ static bool isAcceptableMethodMismatch(ObjCMethodDecl *chosen,
return (chosen->getReturnType()->isIntegerType());
}
+bool Sema::CollectMultipleMethodsInGlobalPool(
+ Selector Sel, SmallVectorImpl<ObjCMethodDecl *> &Methods, bool instance) {
+ if (ExternalSource)
+ ReadMethodPool(Sel);
+
+ GlobalMethodPool::iterator Pos = MethodPool.find(Sel);
+ if (Pos == MethodPool.end())
+ return false;
+ // Gather the non-hidden methods.
+ ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second;
+ for (ObjCMethodList *M = &MethList; M; M = M->getNext())
+ if (M->getMethod() && !M->getMethod()->isHidden())
+ Methods.push_back(M->getMethod());
+ return Methods.size() > 1;
+}
+
+bool Sema::AreMultipleMethodsInGlobalPool(Selector Sel, bool instance) {
+ GlobalMethodPool::iterator Pos = MethodPool.find(Sel);
+ // Test for no method in the pool which should not trigger any warning by
+ // caller.
+ if (Pos == MethodPool.end())
+ return true;
+ ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second;
+ return MethList.hasMoreThanOneDecl();
+}
+
ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool warn, bool instance) {
@@ -2324,12 +2365,12 @@ ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second;
SmallVector<ObjCMethodDecl *, 4> Methods;
for (ObjCMethodList *M = &MethList; M; M = M->getNext()) {
- if (M->Method && !M->Method->isHidden()) {
+ if (M->getMethod() && !M->getMethod()->isHidden()) {
// If we're not supposed to warn about mismatches, we're done.
if (!warn)
- return M->Method;
+ return M->getMethod();
- Methods.push_back(M->Method);
+ Methods.push_back(M->getMethod());
}
}
@@ -2401,13 +2442,13 @@ ObjCMethodDecl *Sema::LookupImplementedMethodInGlobalPool(Selector Sel) {
GlobalMethods &Methods = Pos->second;
for (const ObjCMethodList *Method = &Methods.first; Method;
Method = Method->getNext())
- if (Method->Method && Method->Method->isDefined())
- return Method->Method;
+ if (Method->getMethod() && Method->getMethod()->isDefined())
+ return Method->getMethod();
for (const ObjCMethodList *Method = &Methods.second; Method;
Method = Method->getNext())
- if (Method->Method && Method->Method->isDefined())
- return Method->Method;
+ if (Method->getMethod() && Method->getMethod()->isDefined())
+ return Method->getMethod();
return nullptr;
}
@@ -2470,25 +2511,27 @@ Sema::SelectorsForTypoCorrection(Selector Sel,
e = MethodPool.end(); b != e; b++) {
// instance methods
for (ObjCMethodList *M = &b->second.first; M; M=M->getNext())
- if (M->Method &&
- (M->Method->getSelector().getNumArgs() == NumArgs) &&
- (M->Method->getSelector() != Sel)) {
+ if (M->getMethod() &&
+ (M->getMethod()->getSelector().getNumArgs() == NumArgs) &&
+ (M->getMethod()->getSelector() != Sel)) {
if (ObjectIsId)
- Methods.push_back(M->Method);
+ Methods.push_back(M->getMethod());
else if (!ObjectIsClass &&
- HelperIsMethodInObjCType(*this, M->Method->getSelector(), ObjectType))
- Methods.push_back(M->Method);
+ HelperIsMethodInObjCType(*this, M->getMethod()->getSelector(),
+ ObjectType))
+ Methods.push_back(M->getMethod());
}
// class methods
for (ObjCMethodList *M = &b->second.second; M; M=M->getNext())
- if (M->Method &&
- (M->Method->getSelector().getNumArgs() == NumArgs) &&
- (M->Method->getSelector() != Sel)) {
+ if (M->getMethod() &&
+ (M->getMethod()->getSelector().getNumArgs() == NumArgs) &&
+ (M->getMethod()->getSelector() != Sel)) {
if (ObjectIsClass)
- Methods.push_back(M->Method);
+ Methods.push_back(M->getMethod());
else if (!ObjectIsId &&
- HelperIsMethodInObjCType(*this, M->Method->getSelector(), ObjectType))
- Methods.push_back(M->Method);
+ HelperIsMethodInObjCType(*this, M->getMethod()->getSelector(),
+ ObjectType))
+ Methods.push_back(M->getMethod());
}
}
@@ -2818,7 +2861,7 @@ public:
}
ObjCMethodList &list =
method->isInstanceMethod() ? it->second.first : it->second.second;
- if (!list.Method) return;
+ if (!list.getMethod()) return;
ObjCContainerDecl *container
= cast<ObjCContainerDecl>(method->getDeclContext());
@@ -3237,6 +3280,7 @@ Decl *Sema::ActOnMethodDeclaration(
case OMF_mutableCopy:
case OMF_release:
case OMF_retainCount:
+ case OMF_initialize:
case OMF_performSelector:
break;
diff --git a/lib/Sema/SemaExceptionSpec.cpp b/lib/Sema/SemaExceptionSpec.cpp
index b92fcbd2a0d9..2387325be435 100644
--- a/lib/Sema/SemaExceptionSpec.cpp
+++ b/lib/Sema/SemaExceptionSpec.cpp
@@ -35,6 +35,33 @@ static const FunctionProtoType *GetUnderlyingFunction(QualType T)
return T->getAs<FunctionProtoType>();
}
+/// HACK: libstdc++ has a bug where it shadows std::swap with a member
+/// swap function then tries to call std::swap unqualified from the exception
+/// specification of that function. This function detects whether we're in
+/// such a case and turns off delay-parsing of exception specifications.
+bool Sema::isLibstdcxxEagerExceptionSpecHack(const Declarator &D) {
+ auto *RD = dyn_cast<CXXRecordDecl>(CurContext);
+
+ // All the problem cases are member functions named "swap" within class
+ // templates declared directly within namespace std.
+ if (!RD || RD->getEnclosingNamespaceContext() != getStdNamespace() ||
+ !RD->getIdentifier() || !RD->getDescribedClassTemplate() ||
+ !D.getIdentifier() || !D.getIdentifier()->isStr("swap"))
+ return false;
+
+ // Only apply this hack within a system header.
+ if (!Context.getSourceManager().isInSystemHeader(D.getLocStart()))
+ return false;
+
+ return llvm::StringSwitch<bool>(RD->getIdentifier()->getName())
+ .Case("array", true)
+ .Case("pair", true)
+ .Case("priority_queue", true)
+ .Case("stack", true)
+ .Case("queue", true)
+ .Default(false);
+}
+
/// CheckSpecifiedExceptionType - Check if the given type is valid in an
/// exception specification. Incomplete types, or pointers to incomplete types
/// other than void are not allowed.
@@ -112,6 +139,11 @@ bool Sema::CheckDistantExceptionSpec(QualType T) {
const FunctionProtoType *
Sema::ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT) {
+ if (FPT->getExceptionSpecType() == EST_Unparsed) {
+ Diag(Loc, diag::err_exception_spec_not_parsed);
+ return nullptr;
+ }
+
if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()))
return FPT;
@@ -132,21 +164,14 @@ Sema::ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT) {
return SourceDecl->getType()->castAs<FunctionProtoType>();
}
-void Sema::UpdateExceptionSpec(FunctionDecl *FD,
- const FunctionProtoType::ExtProtoInfo &EPI) {
- const FunctionProtoType *Proto = FD->getType()->castAs<FunctionProtoType>();
-
- // Overwrite the exception spec and rebuild the function type.
- FunctionProtoType::ExtProtoInfo NewEPI = Proto->getExtProtoInfo();
- NewEPI.ExceptionSpecType = EPI.ExceptionSpecType;
- NewEPI.NumExceptions = EPI.NumExceptions;
- NewEPI.Exceptions = EPI.Exceptions;
- NewEPI.NoexceptExpr = EPI.NoexceptExpr;
- FD->setType(Context.getFunctionType(Proto->getReturnType(),
- Proto->getParamTypes(), NewEPI));
+void
+Sema::UpdateExceptionSpec(FunctionDecl *FD,
+ const FunctionProtoType::ExceptionSpecInfo &ESI) {
+ for (auto *Redecl : FD->redecls())
+ Context.adjustExceptionSpec(cast<FunctionDecl>(Redecl), ESI);
// If we've fully resolved the exception specification, notify listeners.
- if (!isUnresolvedExceptionSpec(EPI.ExceptionSpecType))
+ if (!isUnresolvedExceptionSpec(ESI.Type))
if (auto *Listener = getASTMutationListener())
Listener->ResolvedExceptionSpec(FD);
}
@@ -227,32 +252,28 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
(Old->getLocation().isInvalid() ||
Context.getSourceManager().isInSystemHeader(Old->getLocation())) &&
Old->isExternC()) {
- FunctionProtoType::ExtProtoInfo EPI = NewProto->getExtProtoInfo();
- EPI.ExceptionSpecType = EST_DynamicNone;
- QualType NewType = Context.getFunctionType(NewProto->getReturnType(),
- NewProto->getParamTypes(), EPI);
- New->setType(NewType);
+ New->setType(Context.getFunctionType(
+ NewProto->getReturnType(), NewProto->getParamTypes(),
+ NewProto->getExtProtoInfo().withExceptionSpec(EST_DynamicNone)));
return false;
}
const FunctionProtoType *OldProto =
Old->getType()->castAs<FunctionProtoType>();
- FunctionProtoType::ExtProtoInfo EPI = NewProto->getExtProtoInfo();
- EPI.ExceptionSpecType = OldProto->getExceptionSpecType();
- if (EPI.ExceptionSpecType == EST_Dynamic) {
- EPI.NumExceptions = OldProto->getNumExceptions();
- EPI.Exceptions = OldProto->exception_begin();
- } else if (EPI.ExceptionSpecType == EST_ComputedNoexcept) {
+ FunctionProtoType::ExceptionSpecInfo ESI = OldProto->getExceptionSpecType();
+ if (ESI.Type == EST_Dynamic) {
+ ESI.Exceptions = OldProto->exceptions();
+ } else if (ESI.Type == EST_ComputedNoexcept) {
// FIXME: We can't just take the expression from the old prototype. It
// likely contains references to the old prototype's parameters.
}
// Update the type of the function with the appropriate exception
// specification.
- QualType NewType = Context.getFunctionType(NewProto->getReturnType(),
- NewProto->getParamTypes(), EPI);
- New->setType(NewType);
+ New->setType(Context.getFunctionType(
+ NewProto->getReturnType(), NewProto->getParamTypes(),
+ NewProto->getExtProtoInfo().withExceptionSpec(ESI)));
// Warn about the lack of exception specification.
SmallString<128> ExceptionSpecString;
@@ -723,10 +744,11 @@ static bool CheckSpecForTypesEquivalent(Sema &S,
/// assignment and override compatibility check. We do not check the parameters
/// of parameter function pointers recursively, as no sane programmer would
/// even be able to write such a function type.
-bool Sema::CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
- const FunctionProtoType *Target, SourceLocation TargetLoc,
- const FunctionProtoType *Source, SourceLocation SourceLoc)
-{
+bool Sema::CheckParamExceptionSpec(const PartialDiagnostic &NoteID,
+ const FunctionProtoType *Target,
+ SourceLocation TargetLoc,
+ const FunctionProtoType *Source,
+ SourceLocation SourceLoc) {
if (CheckSpecForTypesEquivalent(
*this, PDiag(diag::err_deep_exception_specs_differ) << 0, PDiag(),
Target->getReturnType(), TargetLoc, Source->getReturnType(),
@@ -747,23 +769,30 @@ bool Sema::CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
return false;
}
-bool Sema::CheckExceptionSpecCompatibility(Expr *From, QualType ToType)
-{
+bool Sema::CheckExceptionSpecCompatibility(Expr *From, QualType ToType) {
// First we check for applicability.
// Target type must be a function, function pointer or function reference.
const FunctionProtoType *ToFunc = GetUnderlyingFunction(ToType);
- if (!ToFunc)
+ if (!ToFunc || ToFunc->hasDependentExceptionSpec())
return false;
// SourceType must be a function or function pointer.
const FunctionProtoType *FromFunc = GetUnderlyingFunction(From->getType());
- if (!FromFunc)
+ if (!FromFunc || FromFunc->hasDependentExceptionSpec())
return false;
// Now we've got the correct types on both sides, check their compatibility.
// This means that the source of the conversion can only throw a subset of
// the exceptions of the target, and any exception specs on arguments or
// return types must be equivalent.
+ //
+ // FIXME: If there is a nested dependent exception specification, we should
+ // not be checking it here. This is fine:
+ // template<typename T> void f() {
+ // void (*p)(void (*) throw(T));
+ // void (*q)(void (*) throw(int)) = p;
+ // }
+ // ... because it might be instantiated with T=int.
return CheckExceptionSpecSubset(PDiag(diag::err_incompatible_exception_specs),
PDiag(), ToFunc,
From->getSourceRange().getBegin(),
@@ -772,6 +801,11 @@ bool Sema::CheckExceptionSpecCompatibility(Expr *From, QualType ToType)
bool Sema::CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old) {
+ // If the new exception specification hasn't been parsed yet, skip the check.
+ // We'll get called again once it's been parsed.
+ if (New->getType()->castAs<FunctionProtoType>()->getExceptionSpecType() ==
+ EST_Unparsed)
+ return false;
if (getLangOpts().CPlusPlus11 && isa<CXXDestructorDecl>(New)) {
// Don't check uninstantiated template destructors at all. We can only
// synthesize correct specs after the template is instantiated.
@@ -780,11 +814,18 @@ bool Sema::CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
if (New->getParent()->isBeingDefined()) {
// The destructor might be updated once the definition is finished. So
// remember it and check later.
- DelayedDestructorExceptionSpecChecks.push_back(std::make_pair(
- cast<CXXDestructorDecl>(New), cast<CXXDestructorDecl>(Old)));
+ DelayedExceptionSpecChecks.push_back(std::make_pair(New, Old));
return false;
}
}
+ // If the old exception specification hasn't been parsed yet, remember that
+ // we need to perform this check when we get to the end of the outermost
+ // lexically-surrounding class.
+ if (Old->getType()->castAs<FunctionProtoType>()->getExceptionSpecType() ==
+ EST_Unparsed) {
+ DelayedExceptionSpecChecks.push_back(std::make_pair(New, Old));
+ return false;
+ }
unsigned DiagID = diag::err_override_exception_spec;
if (getLangOpts().MicrosoftExt)
DiagID = diag::ext_override_exception_spec;
@@ -1051,6 +1092,7 @@ CanThrowResult Sema::canThrow(const Expr *E) {
case Expr::CXXDependentScopeMemberExprClass:
case Expr::CXXUnresolvedConstructExprClass:
case Expr::DependentScopeDeclRefExprClass:
+ case Expr::CXXFoldExprClass:
return CT_Dependent;
case Expr::AsTypeExprClass:
@@ -1071,6 +1113,7 @@ CanThrowResult Sema::canThrow(const Expr *E) {
case Expr::UnaryExprOrTypeTraitExprClass:
case Expr::UnresolvedLookupExprClass:
case Expr::UnresolvedMemberExprClass:
+ case Expr::TypoExprClass:
// FIXME: Can any of the above throw? If so, when?
return CT_Cannot;
diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp
index 35dad82523c9..fba7a2d23ffd 100644
--- a/lib/Sema/SemaExpr.cpp
+++ b/lib/Sema/SemaExpr.cpp
@@ -42,6 +42,7 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaFixItUtils.h"
#include "clang/Sema/Template.h"
+#include "llvm/Support/ConvertUTF.h"
using namespace clang;
using namespace sema;
@@ -59,7 +60,7 @@ bool Sema::CanUseDecl(NamedDecl *D) {
// If the function has a deduced return type, and we can't deduce it,
// then we can't use it either.
- if (getLangOpts().CPlusPlus1y && FD->getReturnType()->isUndeducedType() &&
+ if (getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() &&
DeduceReturnType(FD, SourceLocation(), /*Diagnose*/ false))
return false;
}
@@ -302,7 +303,7 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
// If the function has a deduced return type, and we can't deduce it,
// then we can't use it either.
- if (getLangOpts().CPlusPlus1y && FD->getReturnType()->isUndeducedType() &&
+ if (getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() &&
DeduceReturnType(FD, Loc))
return true;
}
@@ -397,8 +398,8 @@ void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
if (sentinelExpr->isValueDependent()) return;
if (Context.isSentinelNullExpr(sentinelExpr)) return;
- // Pick a reasonable string to insert. Optimistically use 'nil' or
- // 'NULL' if those are actually defined in the context. Only use
+ // Pick a reasonable string to insert. Optimistically use 'nil', 'nullptr',
+ // or 'NULL' if those are actually defined in the context. Only use
// 'nil' for ObjC methods, where it's much more likely that the
// variadic arguments form a list of object pointers.
SourceLocation MissingNilLoc
@@ -407,6 +408,8 @@ void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
if (calleeType == CT_Method &&
PP.getIdentifierInfo("nil")->hasMacroDefinition())
NullValue = "nil";
+ else if (getLangOpts().CPlusPlus11)
+ NullValue = "nullptr";
else if (PP.getIdentifierInfo("NULL")->hasMacroDefinition())
NullValue = "NULL";
else
@@ -800,6 +803,9 @@ Sema::VarArgKind Sema::isValidVarArgType(const QualType &Ty) {
if (Ty->isObjCObjectType())
return VAK_Invalid;
+ if (getLangOpts().MSVCCompat)
+ return VAK_MSVCUndefined;
+
// FIXME: In C++11, these cases are conditionally-supported, meaning we're
// permitted to reject them. We should consider doing so.
return VAK_Undefined;
@@ -829,6 +835,7 @@ void Sema::checkVariadicArgument(const Expr *E, VariadicCallType CT) {
break;
case VAK_Undefined:
+ case VAK_MSVCUndefined:
DiagRuntimeBehavior(
E->getLocStart(), nullptr,
PDiag(diag::warn_cannot_pass_non_pod_arg_to_vararg)
@@ -933,68 +940,6 @@ static bool handleIntegerToComplexFloatConversion(Sema &S, ExprResult &IntExpr,
return false;
}
-/// \brief Takes two complex float types and converts them to the same type.
-/// Helper function of UsualArithmeticConversions()
-static QualType
-handleComplexFloatToComplexFloatConverstion(Sema &S, ExprResult &LHS,
- ExprResult &RHS, QualType LHSType,
- QualType RHSType,
- bool IsCompAssign) {
- int order = S.Context.getFloatingTypeOrder(LHSType, RHSType);
-
- if (order < 0) {
- // _Complex float -> _Complex double
- if (!IsCompAssign)
- LHS = S.ImpCastExprToType(LHS.get(), RHSType, CK_FloatingComplexCast);
- return RHSType;
- }
- if (order > 0)
- // _Complex float -> _Complex double
- RHS = S.ImpCastExprToType(RHS.get(), LHSType, CK_FloatingComplexCast);
- return LHSType;
-}
-
-/// \brief Converts otherExpr to complex float and promotes complexExpr if
-/// necessary. Helper function of UsualArithmeticConversions()
-static QualType handleOtherComplexFloatConversion(Sema &S,
- ExprResult &ComplexExpr,
- ExprResult &OtherExpr,
- QualType ComplexTy,
- QualType OtherTy,
- bool ConvertComplexExpr,
- bool ConvertOtherExpr) {
- int order = S.Context.getFloatingTypeOrder(ComplexTy, OtherTy);
-
- // If just the complexExpr is complex, the otherExpr needs to be converted,
- // and the complexExpr might need to be promoted.
- if (order > 0) { // complexExpr is wider
- // float -> _Complex double
- if (ConvertOtherExpr) {
- QualType fp = cast<ComplexType>(ComplexTy)->getElementType();
- OtherExpr = S.ImpCastExprToType(OtherExpr.get(), fp, CK_FloatingCast);
- OtherExpr = S.ImpCastExprToType(OtherExpr.get(), ComplexTy,
- CK_FloatingRealToComplex);
- }
- return ComplexTy;
- }
-
- // otherTy is at least as wide. Find its corresponding complex type.
- QualType result = (order == 0 ? ComplexTy :
- S.Context.getComplexType(OtherTy));
-
- // double -> _Complex double
- if (ConvertOtherExpr)
- OtherExpr = S.ImpCastExprToType(OtherExpr.get(), result,
- CK_FloatingRealToComplex);
-
- // _Complex float -> _Complex double
- if (ConvertComplexExpr && order < 0)
- ComplexExpr = S.ImpCastExprToType(ComplexExpr.get(), result,
- CK_FloatingComplexCast);
-
- return result;
-}
-
/// \brief Handle arithmetic conversion with complex types. Helper function of
/// UsualArithmeticConversions()
static QualType handleComplexFloatConversion(Sema &S, ExprResult &LHS,
@@ -1020,26 +965,35 @@ static QualType handleComplexFloatConversion(Sema &S, ExprResult &LHS,
// when combining a "long double" with a "double _Complex", the
// "double _Complex" is promoted to "long double _Complex".
- bool LHSComplexFloat = LHSType->isComplexType();
- bool RHSComplexFloat = RHSType->isComplexType();
-
- // If both are complex, just cast to the more precise type.
- if (LHSComplexFloat && RHSComplexFloat)
- return handleComplexFloatToComplexFloatConverstion(S, LHS, RHS,
- LHSType, RHSType,
- IsCompAssign);
-
- // If only one operand is complex, promote it if necessary and convert the
- // other operand to complex.
- if (LHSComplexFloat)
- return handleOtherComplexFloatConversion(
- S, LHS, RHS, LHSType, RHSType, /*convertComplexExpr*/!IsCompAssign,
- /*convertOtherExpr*/ true);
-
- assert(RHSComplexFloat);
- return handleOtherComplexFloatConversion(
- S, RHS, LHS, RHSType, LHSType, /*convertComplexExpr*/true,
- /*convertOtherExpr*/ !IsCompAssign);
+ // Compute the rank of the two types, regardless of whether they are complex.
+ int Order = S.Context.getFloatingTypeOrder(LHSType, RHSType);
+
+ auto *LHSComplexType = dyn_cast<ComplexType>(LHSType);
+ auto *RHSComplexType = dyn_cast<ComplexType>(RHSType);
+ QualType LHSElementType =
+ LHSComplexType ? LHSComplexType->getElementType() : LHSType;
+ QualType RHSElementType =
+ RHSComplexType ? RHSComplexType->getElementType() : RHSType;
+
+ QualType ResultType = S.Context.getComplexType(LHSElementType);
+ if (Order < 0) {
+ // Promote the precision of the LHS if not an assignment.
+ ResultType = S.Context.getComplexType(RHSElementType);
+ if (!IsCompAssign) {
+ if (LHSComplexType)
+ LHS =
+ S.ImpCastExprToType(LHS.get(), ResultType, CK_FloatingComplexCast);
+ else
+ LHS = S.ImpCastExprToType(LHS.get(), RHSElementType, CK_FloatingCast);
+ }
+ } else if (Order > 0) {
+ // Promote the precision of the RHS.
+ if (RHSComplexType)
+ RHS = S.ImpCastExprToType(RHS.get(), ResultType, CK_FloatingComplexCast);
+ else
+ RHS = S.ImpCastExprToType(RHS.get(), LHSElementType, CK_FloatingCast);
+ }
+ return ResultType;
}
/// \brief Hande arithmetic conversion from integer to float. Helper function
@@ -1336,6 +1290,13 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
ControllingExpr = result.get();
}
+ // The controlling expression is an unevaluated operand, so side effects are
+ // likely unintended.
+ if (ActiveTemplateInstantiations.empty() &&
+ ControllingExpr->HasSideEffects(Context, false))
+ Diag(ControllingExpr->getExprLoc(),
+ diag::warn_side_effects_unevaluated_context);
+
bool TypeErrorFound = false,
IsResultDependent = ControllingExpr->isTypeDependent(),
ContainsUnexpandedParameterPack
@@ -1636,40 +1597,37 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
if (getLangOpts().CUDA)
if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
if (const FunctionDecl *Callee = dyn_cast<FunctionDecl>(D)) {
- CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller),
- CalleeTarget = IdentifyCUDATarget(Callee);
- if (CheckCUDATarget(CallerTarget, CalleeTarget)) {
+ if (CheckCUDATarget(Caller, Callee)) {
Diag(NameInfo.getLoc(), diag::err_ref_bad_target)
- << CalleeTarget << D->getIdentifier() << CallerTarget;
+ << IdentifyCUDATarget(Callee) << D->getIdentifier()
+ << IdentifyCUDATarget(Caller);
Diag(D->getLocation(), diag::note_previous_decl)
<< D->getIdentifier();
return ExprError();
}
}
- bool refersToEnclosingScope =
- (CurContext != D->getDeclContext() &&
- D->getDeclContext()->isFunctionOrMethod()) ||
- (isa<VarDecl>(D) &&
- cast<VarDecl>(D)->isInitCapture());
+ bool RefersToCapturedVariable =
+ isa<VarDecl>(D) &&
+ NeedToCaptureVariable(cast<VarDecl>(D), NameInfo.getLoc());
DeclRefExpr *E;
if (isa<VarTemplateSpecializationDecl>(D)) {
VarTemplateSpecializationDecl *VarSpec =
cast<VarTemplateSpecializationDecl>(D);
- E = DeclRefExpr::Create(
- Context,
- SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc(),
- VarSpec->getTemplateKeywordLoc(), D, refersToEnclosingScope,
- NameInfo.getLoc(), Ty, VK, FoundD, TemplateArgs);
+ E = DeclRefExpr::Create(Context, SS ? SS->getWithLocInContext(Context)
+ : NestedNameSpecifierLoc(),
+ VarSpec->getTemplateKeywordLoc(), D,
+ RefersToCapturedVariable, NameInfo.getLoc(), Ty, VK,
+ FoundD, TemplateArgs);
} else {
assert(!TemplateArgs && "No template arguments for non-variable"
" template specialization references");
- E = DeclRefExpr::Create(
- Context,
- SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc(),
- SourceLocation(), D, refersToEnclosingScope, NameInfo, Ty, VK, FoundD);
+ E = DeclRefExpr::Create(Context, SS ? SS->getWithLocInContext(Context)
+ : NestedNameSpecifierLoc(),
+ SourceLocation(), D, RefersToCapturedVariable,
+ NameInfo, Ty, VK, FoundD);
}
MarkDeclRefReferenced(E);
@@ -1719,13 +1677,48 @@ Sema::DecomposeUnqualifiedId(const UnqualifiedId &Id,
}
}
+static void emitEmptyLookupTypoDiagnostic(
+ const TypoCorrection &TC, Sema &SemaRef, const CXXScopeSpec &SS,
+ DeclarationName Typo, SourceLocation TypoLoc, ArrayRef<Expr *> Args,
+ unsigned DiagnosticID, unsigned DiagnosticSuggestID) {
+ DeclContext *Ctx =
+ SS.isEmpty() ? nullptr : SemaRef.computeDeclContext(SS, false);
+ if (!TC) {
+ // Emit a special diagnostic for failed member lookups.
+ // FIXME: computing the declaration context might fail here (?)
+ if (Ctx)
+ SemaRef.Diag(TypoLoc, diag::err_no_member) << Typo << Ctx
+ << SS.getRange();
+ else
+ SemaRef.Diag(TypoLoc, DiagnosticID) << Typo;
+ return;
+ }
+
+ std::string CorrectedStr = TC.getAsString(SemaRef.getLangOpts());
+ bool DroppedSpecifier =
+ TC.WillReplaceSpecifier() && Typo.getAsString() == CorrectedStr;
+ unsigned NoteID =
+ (TC.getCorrectionDecl() && isa<ImplicitParamDecl>(TC.getCorrectionDecl()))
+ ? diag::note_implicit_param_decl
+ : diag::note_previous_decl;
+ if (!Ctx)
+ SemaRef.diagnoseTypo(TC, SemaRef.PDiag(DiagnosticSuggestID) << Typo,
+ SemaRef.PDiag(NoteID));
+ else
+ SemaRef.diagnoseTypo(TC, SemaRef.PDiag(diag::err_no_member_suggest)
+ << Typo << Ctx << DroppedSpecifier
+ << SS.getRange(),
+ SemaRef.PDiag(NoteID));
+}
+
/// Diagnose an empty lookup.
///
/// \return false if new lookup candidates were found
-bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
- CorrectionCandidateCallback &CCC,
- TemplateArgumentListInfo *ExplicitTemplateArgs,
- ArrayRef<Expr *> Args) {
+bool
+Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
+ std::unique_ptr<CorrectionCandidateCallback> CCC,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ ArrayRef<Expr *> Args, TypoExpr **Out) {
DeclarationName Name = R.getLookupName();
unsigned diagnostic = diag::err_undeclared_var_use;
@@ -1842,8 +1835,22 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
// We didn't find anything, so try to correct for a typo.
TypoCorrection Corrected;
- if (S && (Corrected = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(),
- S, &SS, CCC, CTK_ErrorRecovery))) {
+ if (S && Out) {
+ SourceLocation TypoLoc = R.getNameLoc();
+ assert(!ExplicitTemplateArgs &&
+ "Diagnosing an empty lookup with explicit template args!");
+ *Out = CorrectTypoDelayed(
+ R.getLookupNameInfo(), R.getLookupKind(), S, &SS, std::move(CCC),
+ [=](const TypoCorrection &TC) {
+ emitEmptyLookupTypoDiagnostic(TC, *this, SS, Name, TypoLoc, Args,
+ diagnostic, diagnostic_suggest);
+ },
+ nullptr, CTK_ErrorRecovery);
+ if (*Out)
+ return true;
+ } else if (S && (Corrected =
+ CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S,
+ &SS, std::move(CCC), CTK_ErrorRecovery))) {
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
bool DroppedSpecifier =
Corrected.WillReplaceSpecifier() && Name.getAsString() == CorrectedStr;
@@ -1990,14 +1997,12 @@ recoverFromMSUnqualifiedLookup(Sema &S, ASTContext &Context,
TemplateArgs);
}
-ExprResult Sema::ActOnIdExpression(Scope *S,
- CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- UnqualifiedId &Id,
- bool HasTrailingLParen,
- bool IsAddressOfOperand,
- CorrectionCandidateCallback *CCC,
- bool IsInlineAsmIdentifier) {
+ExprResult
+Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc, UnqualifiedId &Id,
+ bool HasTrailingLParen, bool IsAddressOfOperand,
+ std::unique_ptr<CorrectionCandidateCallback> CCC,
+ bool IsInlineAsmIdentifier, Token *KeywordReplacement) {
assert(!(IsAddressOfOperand && HasTrailingLParen) &&
"cannot be direct & operand and have a trailing lparen");
if (SS.isInvalid())
@@ -2109,12 +2114,43 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
// If this name wasn't predeclared and if this is not a function
// call, diagnose the problem.
- CorrectionCandidateCallback DefaultValidator;
- DefaultValidator.IsAddressOfOperand = IsAddressOfOperand;
+ TypoExpr *TE = nullptr;
+ auto DefaultValidator = llvm::make_unique<CorrectionCandidateCallback>(
+ II, SS.isValid() ? SS.getScopeRep() : nullptr);
+ DefaultValidator->IsAddressOfOperand = IsAddressOfOperand;
assert((!CCC || CCC->IsAddressOfOperand == IsAddressOfOperand) &&
"Typo correction callback misconfigured");
- if (DiagnoseEmptyLookup(S, SS, R, CCC ? *CCC : DefaultValidator))
- return ExprError();
+ if (CCC) {
+ // Make sure the callback knows what the typo being diagnosed is.
+ CCC->setTypoName(II);
+ if (SS.isValid())
+ CCC->setTypoNNS(SS.getScopeRep());
+ }
+ if (DiagnoseEmptyLookup(S, SS, R,
+ CCC ? std::move(CCC) : std::move(DefaultValidator),
+ nullptr, None, &TE)) {
+ if (TE && KeywordReplacement) {
+ auto &State = getTypoExprState(TE);
+ auto BestTC = State.Consumer->getNextCorrection();
+ if (BestTC.isKeyword()) {
+ auto *II = BestTC.getCorrectionAsIdentifierInfo();
+ if (State.DiagHandler)
+ State.DiagHandler(BestTC);
+ KeywordReplacement->startToken();
+ KeywordReplacement->setKind(II->getTokenID());
+ KeywordReplacement->setIdentifierInfo(II);
+ KeywordReplacement->setLocation(BestTC.getCorrectionRange().getBegin());
+ // Clean up the state associated with the TypoExpr, since it has
+ // now been diagnosed (without a call to CorrectDelayedTyposInExpr).
+ clearDelayedTypo(TE);
+ // Signal that a correction to a keyword was performed by returning a
+ // valid-but-null ExprResult.
+ return (Expr*)nullptr;
+ }
+ State.Consumer->resetCorrectionStream();
+ }
+ return TE ? TE : ExprError();
+ }
assert(!R.empty() &&
"DiagnoseEmptyLookup returned false but added no results");
@@ -2364,10 +2400,9 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
!IvarBacksCurrentMethodAccessor(IFace, CurMethod, IV))
Diag(Loc, diag::warn_direct_ivar_access) << IV->getDeclName();
- ObjCIvarRefExpr *Result = new (Context) ObjCIvarRefExpr(IV, IV->getType(),
- Loc, IV->getLocation(),
- SelfExpr.get(),
- true, true);
+ ObjCIvarRefExpr *Result = new (Context)
+ ObjCIvarRefExpr(IV, IV->getType(), Loc, IV->getLocation(),
+ SelfExpr.get(), true, true);
if (getLangOpts().ObjCAutoRefCount) {
if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
@@ -2660,15 +2695,15 @@ static bool CheckDeclInExpr(Sema &S, SourceLocation Loc, NamedDecl *D) {
return false;
}
-ExprResult
-Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
- LookupResult &R,
- bool NeedsADL) {
+ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
+ LookupResult &R, bool NeedsADL,
+ bool AcceptInvalidDecl) {
// If this is a single, fully-resolved result and we don't need ADL,
// just build an ordinary singleton decl ref.
if (!NeedsADL && R.isSingleResult() && !R.getAsSingle<FunctionTemplateDecl>())
return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(), R.getFoundDecl(),
- R.getRepresentativeDecl());
+ R.getRepresentativeDecl(), nullptr,
+ AcceptInvalidDecl);
// We only need to check the declaration if there's exactly one
// result, because in the overloaded case the results can only be
@@ -2695,7 +2730,8 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
/// \brief Complete semantic analysis for a reference to the given declaration.
ExprResult Sema::BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
- NamedDecl *FoundD, const TemplateArgumentListInfo *TemplateArgs) {
+ NamedDecl *FoundD, const TemplateArgumentListInfo *TemplateArgs,
+ bool AcceptInvalidDecl) {
assert(D && "Cannot refer to a NULL declaration");
assert(!isa<FunctionTemplateDecl>(D) &&
"Cannot refer unambiguously to a function template");
@@ -2730,7 +2766,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
return ExprError();
// Only create DeclRefExpr's for valid Decl's.
- if (VD->isInvalidDecl())
+ if (VD->isInvalidDecl() && !AcceptInvalidDecl)
return ExprError();
// Handle members of anonymous structs and unions. If we got here,
@@ -2902,6 +2938,17 @@ ExprResult Sema::BuildDeclarationNameExpr(
}
}
+static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
+ SmallString<32> &Target) {
+ Target.resize(CharByteWidth * (Source.size() + 1));
+ char *ResultPtr = &Target[0];
+ const UTF8 *ErrorPtr;
+ bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr);
+ (void)success;
+ assert(success);
+ Target.resize(ResultPtr - &Target[0]);
+}
+
ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT) {
// Pick the current block, lambda, captured statement or function.
@@ -2921,22 +2968,35 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
}
QualType ResTy;
+ StringLiteral *SL = nullptr;
if (cast<DeclContext>(currentDecl)->isDependentContext())
ResTy = Context.DependentTy;
else {
// Pre-defined identifiers are of type char[x], where x is the length of
// the string.
- unsigned Length = PredefinedExpr::ComputeName(IT, currentDecl).length();
+ auto Str = PredefinedExpr::ComputeName(IT, currentDecl);
+ unsigned Length = Str.length();
llvm::APInt LengthI(32, Length + 1);
- if (IT == PredefinedExpr::LFunction)
+ if (IT == PredefinedExpr::LFunction) {
ResTy = Context.WideCharTy.withConst();
- else
+ SmallString<32> RawChars;
+ ConvertUTF8ToWideString(Context.getTypeSizeInChars(ResTy).getQuantity(),
+ Str, RawChars);
+ ResTy = Context.getConstantArrayType(ResTy, LengthI, ArrayType::Normal,
+ /*IndexTypeQuals*/ 0);
+ SL = StringLiteral::Create(Context, RawChars, StringLiteral::Wide,
+ /*Pascal*/ false, ResTy, Loc);
+ } else {
ResTy = Context.CharTy.withConst();
- ResTy = Context.getConstantArrayType(ResTy, LengthI, ArrayType::Normal, 0);
+ ResTy = Context.getConstantArrayType(ResTy, LengthI, ArrayType::Normal,
+ /*IndexTypeQuals*/ 0);
+ SL = StringLiteral::Create(Context, Str, StringLiteral::Ascii,
+ /*Pascal*/ false, ResTy, Loc);
+ }
}
- return new (Context) PredefinedExpr(Loc, ResTy, IT);
+ return new (Context) PredefinedExpr(Loc, ResTy, IT, SL);
}
ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
@@ -3046,6 +3106,34 @@ static Expr *BuildFloatingLiteral(Sema &S, NumericLiteralParser &Literal,
return FloatingLiteral::Create(S.Context, Val, isExact, Ty, Loc);
}
+bool Sema::CheckLoopHintExpr(Expr *E, SourceLocation Loc) {
+ assert(E && "Invalid expression");
+
+ if (E->isValueDependent())
+ return false;
+
+ QualType QT = E->getType();
+ if (!QT->isIntegerType() || QT->isBooleanType() || QT->isCharType()) {
+ Diag(E->getExprLoc(), diag::err_pragma_loop_invalid_argument_type) << QT;
+ return true;
+ }
+
+ llvm::APSInt ValueAPS;
+ ExprResult R = VerifyIntegerConstantExpression(E, &ValueAPS);
+
+ if (R.isInvalid())
+ return true;
+
+ bool ValueIsPositive = ValueAPS.isStrictlyPositive();
+ if (!ValueIsPositive || ValueAPS.getActiveBits() > 31) {
+ Diag(E->getExprLoc(), diag::err_pragma_loop_invalid_argument_value)
+ << ValueAPS.toString(10) << ValueIsPositive;
+ return true;
+ }
+
+ return false;
+}
+
ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
// Fast path for a single digit (which is quite common). A single digit
// cannot have a trigraph, escaped newline, radix prefix, or suffix.
@@ -3117,7 +3205,8 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
} else {
llvm::APInt ResultVal(Context.getTargetInfo().getLongLongWidth(), 0);
if (Literal.GetIntegerValue(ResultVal))
- Diag(Tok.getLocation(), diag::err_integer_too_large);
+ Diag(Tok.getLocation(), diag::err_integer_literal_too_large)
+ << /* Unsigned */ 1;
Lit = IntegerLiteral::Create(Context, ResultVal, CookedTy,
Tok.getLocation());
}
@@ -3210,7 +3299,8 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
if (Literal.GetIntegerValue(ResultVal)) {
// If this value didn't fit into uintmax_t, error and force to ull.
- Diag(Tok.getLocation(), diag::err_integer_too_large);
+ Diag(Tok.getLocation(), diag::err_integer_literal_too_large)
+ << /* Unsigned */ 1;
Ty = Context.UnsignedLongLongTy;
assert(Context.getTypeSize(Ty) == ResultVal.getBitWidth() &&
"long long is not intmax_t?");
@@ -3290,7 +3380,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
// If we still couldn't decide a type, we probably have something that
// does not fit in a signed long long, but has no U suffix.
if (Ty.isNull()) {
- Diag(Tok.getLocation(), diag::ext_integer_too_large_for_signed);
+ Diag(Tok.getLocation(), diag::ext_integer_literal_too_large_for_signed);
Ty = Context.UnsignedLongLongTy;
Width = Context.getTargetInfo().getLongLongWidth();
}
@@ -3442,6 +3532,12 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
return true;
}
+ // The operand for sizeof and alignof is in an unevaluated expression context,
+ // so side effects could result in unintended consequences.
+ if ((ExprKind == UETT_SizeOf || ExprKind == UETT_AlignOf) &&
+ ActiveTemplateInstantiations.empty() && E->HasSideEffects(Context, false))
+ Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
+
if (CheckObjCTraitOperandConstraints(*this, ExprTy, E->getExprLoc(),
E->getSourceRange(), ExprKind))
return true;
@@ -4083,11 +4179,12 @@ static TypoCorrection TryTypoCorrectionForCall(Sema &S, Expr *Fn,
MemberExpr *ME = dyn_cast<MemberExpr>(Fn);
DeclarationName FuncName = FDecl->getDeclName();
SourceLocation NameLoc = ME ? ME->getMemberLoc() : Fn->getLocStart();
- FunctionCallCCC CCC(S, FuncName.getAsIdentifierInfo(), Args.size(), ME);
if (TypoCorrection Corrected = S.CorrectTypo(
DeclarationNameInfo(FuncName, NameLoc), Sema::LookupOrdinaryName,
- S.getScopeForContext(S.CurContext), nullptr, CCC,
+ S.getScopeForContext(S.CurContext), nullptr,
+ llvm::make_unique<FunctionCallCCC>(S, FuncName.getAsIdentifierInfo(),
+ Args.size(), ME),
Sema::CTK_ErrorRecovery)) {
if (NamedDecl *ND = Corrected.getCorrectionDecl()) {
if (Corrected.isOverloaded()) {
@@ -4454,6 +4551,8 @@ static bool checkArgsForPlaceholders(Sema &S, MultiExprArg args) {
ExprResult result = S.CheckPlaceholderExpr(args[i]);
if (result.isInvalid()) hasInvalid = true;
else args[i] = result.get();
+ } else if (hasInvalid) {
+ (void)S.CorrectDelayedTyposInExpr(args[i]);
}
}
return hasInvalid;
@@ -4587,23 +4686,6 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
ExecConfig, IsExecConfig);
}
-ExprResult
-Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
- MultiExprArg ExecConfig, SourceLocation GGGLoc) {
- FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
- if (!ConfigDecl)
- return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
- << "cudaConfigureCall");
- QualType ConfigQTy = ConfigDecl->getType();
-
- DeclRefExpr *ConfigDR = new (Context) DeclRefExpr(
- ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
- MarkFunctionReferenced(LLLLoc, ConfigDecl);
-
- return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
- /*IsExecConfig=*/true);
-}
-
/// ActOnAsTypeExpr - create a new asType (bitcast) from the arguments.
///
/// __builtin_astype( value, dst type )
@@ -4680,8 +4762,12 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
VK_RValue, RParenLoc);
// Bail out early if calling a builtin with custom typechecking.
- if (BuiltinID && Context.BuiltinInfo.hasCustomTypechecking(BuiltinID))
- return CheckBuiltinFunctionCall(BuiltinID, TheCall);
+ if (BuiltinID && Context.BuiltinInfo.hasCustomTypechecking(BuiltinID)) {
+ ExprResult Res = CorrectDelayedTyposInExpr(TheCall);
+ if (!Res.isUsable() || !isa<CallExpr>(Res.get()))
+ return Res;
+ return CheckBuiltinFunctionCall(FDecl, BuiltinID, cast<CallExpr>(Res.get()));
+ }
retry:
const FunctionType *FuncT;
@@ -4809,7 +4895,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
return ExprError();
if (BuiltinID)
- return CheckBuiltinFunctionCall(BuiltinID, TheCall);
+ return CheckBuiltinFunctionCall(FDecl, BuiltinID, TheCall);
} else if (NDecl) {
if (CheckPointerCall(NDecl, TheCall, Proto))
return ExprError();
@@ -5227,6 +5313,12 @@ Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
if (getLangOpts().CPlusPlus) {
// Check that there are no default arguments (C++ only).
CheckExtraCXXDefaultArguments(D);
+ } else {
+ // Make sure any TypoExprs have been dealt with.
+ ExprResult Res = CorrectDelayedTyposInExpr(CastExpr);
+ if (!Res.isUsable())
+ return ExprError();
+ CastExpr = Res.get();
}
checkUnusedDeclAttributes(D);
@@ -5693,6 +5785,15 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
ExprObjectKind &OK,
SourceLocation QuestionLoc) {
+ if (!getLangOpts().CPlusPlus) {
+ // C cannot handle TypoExpr nodes on either side of a binop because it
+ // doesn't handle dependent types properly, so make sure any TypoExprs have
+ // been dealt with before checking the operands.
+ ExprResult CondResult = CorrectDelayedTyposInExpr(Cond);
+ if (!CondResult.isUsable()) return QualType();
+ Cond = CondResult;
+ }
+
ExprResult LHSResult = CheckPlaceholderExpr(LHS.get());
if (!LHSResult.isUsable()) return QualType();
LHS = LHSResult;
@@ -5720,7 +5821,7 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
RHS.get()->getType()->isVectorType())
return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false);
- UsualArithmeticConversions(LHS, RHS);
+ QualType ResTy = UsualArithmeticConversions(LHS, RHS);
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
@@ -5737,8 +5838,12 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// If both operands have arithmetic type, do the usual arithmetic conversions
// to find a common type: C99 6.5.15p3,5.
- if (LHSTy->isArithmeticType() && RHSTy->isArithmeticType())
- return LHS.get()->getType();
+ if (LHSTy->isArithmeticType() && RHSTy->isArithmeticType()) {
+ LHS = ImpCastExprToType(LHS.get(), ResTy, PrepareScalarCast(LHS, ResTy));
+ RHS = ImpCastExprToType(RHS.get(), ResTy, PrepareScalarCast(RHS, ResTy));
+
+ return ResTy;
+ }
// If both operands are the same structure or union type, the result is that
// type.
@@ -6161,7 +6266,7 @@ checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
if (!lhq.compatiblyIncludes(rhq)) {
// Treat address-space mismatches as fatal. TODO: address subspaces
- if (lhq.getAddressSpace() != rhq.getAddressSpace())
+ if (!lhq.isAddressSpaceSupersetOf(rhq))
ConvTy = Sema::IncompatiblePointerDiscardsQualifiers;
// It's okay to add or remove GC or lifetime qualifiers when converting to
@@ -6446,7 +6551,9 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
if (const PointerType *LHSPointer = dyn_cast<PointerType>(LHSType)) {
// U* -> T*
if (isa<PointerType>(RHSType)) {
- Kind = CK_BitCast;
+ unsigned AddrSpaceL = LHSPointer->getPointeeType().getAddressSpace();
+ unsigned AddrSpaceR = RHSType->getPointeeType().getAddressSpace();
+ Kind = AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast;
return checkPointerTypesForAssignment(*this, LHSType, RHSType);
}
@@ -6754,6 +6861,15 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS,
return Incompatible;
}
+ Expr *PRE = RHS.get()->IgnoreParenCasts();
+ if (ObjCProtocolExpr *OPE = dyn_cast<ObjCProtocolExpr>(PRE)) {
+ ObjCProtocolDecl *PDecl = OPE->getProtocol();
+ if (PDecl && !PDecl->hasDefinition()) {
+ Diag(PRE->getExprLoc(), diag::warn_atprotocol_protocol) << PDecl->getName();
+ Diag(PDecl->getLocation(), diag::note_entity_declared_at) << PDecl;
+ }
+ }
+
CastKind Kind = CK_Invalid;
Sema::AssignConvertType result =
CheckAssignmentConstraints(LHSType, RHS, Kind);
@@ -7120,6 +7236,19 @@ static bool checkArithmeticBinOpPointerOperands(Sema &S, SourceLocation Loc,
if (isLHSPointer) LHSPointeeTy = LHSExpr->getType()->getPointeeType();
if (isRHSPointer) RHSPointeeTy = RHSExpr->getType()->getPointeeType();
+ // if both are pointers check if operation is valid wrt address spaces
+ if (isLHSPointer && isRHSPointer) {
+ const PointerType *lhsPtr = LHSExpr->getType()->getAs<PointerType>();
+ const PointerType *rhsPtr = RHSExpr->getType()->getAs<PointerType>();
+ if (!lhsPtr->isAddressSpaceOverlapping(*rhsPtr)) {
+ S.Diag(Loc,
+ diag::err_typecheck_op_on_nonoverlapping_address_space_pointers)
+ << LHSExpr->getType() << RHSExpr->getType() << 1 /*arithmetic op*/
+ << LHSExpr->getSourceRange() << RHSExpr->getSourceRange();
+ return false;
+ }
+ }
+
// Check for arithmetic on pointers to incomplete types.
bool isLHSVoidPtr = isLHSPointer && LHSPointeeTy->isVoidType();
bool isRHSVoidPtr = isRHSPointer && RHSPointeeTy->isVoidType();
@@ -7163,7 +7292,7 @@ static void diagnoseStringPlusInt(Sema &Self, SourceLocation OpLoc,
bool IsStringPlusInt = StrExpr &&
IndexExpr->getType()->isIntegralOrUnscopedEnumerationType();
- if (!IsStringPlusInt)
+ if (!IsStringPlusInt || IndexExpr->isValueDependent())
return;
llvm::APSInt index;
@@ -7193,13 +7322,13 @@ static void diagnoseStringPlusInt(Sema &Self, SourceLocation OpLoc,
/// \brief Emit a warning when adding a char literal to a string.
static void diagnoseStringPlusChar(Sema &Self, SourceLocation OpLoc,
Expr *LHSExpr, Expr *RHSExpr) {
- const DeclRefExpr *StringRefExpr =
- dyn_cast<DeclRefExpr>(LHSExpr->IgnoreImpCasts());
+ const Expr *StringRefExpr = LHSExpr;
const CharacterLiteral *CharExpr =
dyn_cast<CharacterLiteral>(RHSExpr->IgnoreImpCasts());
- if (!StringRefExpr) {
- StringRefExpr = dyn_cast<DeclRefExpr>(RHSExpr->IgnoreImpCasts());
+
+ if (!CharExpr) {
CharExpr = dyn_cast<CharacterLiteral>(LHSExpr->IgnoreImpCasts());
+ StringRefExpr = RHSExpr;
}
if (!CharExpr || !StringRefExpr)
@@ -7417,7 +7546,7 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
}
static bool isScopedEnumerationType(QualType T) {
- if (const EnumType *ET = dyn_cast<EnumType>(T))
+ if (const EnumType *ET = T->getAs<EnumType>())
return ET->getDecl()->isScoped();
return false;
}
@@ -8048,6 +8177,13 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS, /*isError*/false);
}
if (LCanPointeeTy != RCanPointeeTy) {
+ const PointerType *lhsPtr = LHSType->getAs<PointerType>();
+ if (!lhsPtr->isAddressSpaceOverlapping(*RHSType->getAs<PointerType>())) {
+ Diag(Loc,
+ diag::err_typecheck_op_on_nonoverlapping_address_space_pointers)
+ << LHSType << RHSType << 0 /* comparison */
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ }
unsigned AddrSpaceL = LCanPointeeTy.getAddressSpace();
unsigned AddrSpaceR = RCanPointeeTy.getAddressSpace();
CastKind Kind = AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion
@@ -8462,7 +8598,7 @@ static NonConstCaptureKind isReferenceToNonConstCapture(Sema &S, Expr *E) {
// Must be a reference to a declaration from an enclosing scope.
DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
if (!DRE) return NCCK_None;
- if (!DRE->refersToEnclosingLocal()) return NCCK_None;
+ if (!DRE->refersToEnclosingVariableOrCapture()) return NCCK_None;
// The declaration must be a variable which is not declared 'const'.
VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
@@ -8494,19 +8630,19 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
if (IsLV == Expr::MLV_Valid)
return false;
- unsigned Diag = 0;
+ unsigned DiagID = 0;
bool NeedType = false;
switch (IsLV) { // C99 6.5.16p2
case Expr::MLV_ConstQualified:
- Diag = diag::err_typecheck_assign_const;
+ DiagID = diag::err_typecheck_assign_const;
// Use a specialized diagnostic when we're assigning to an object
// from an enclosing function or block.
if (NonConstCaptureKind NCCK = isReferenceToNonConstCapture(S, E)) {
if (NCCK == NCCK_Block)
- Diag = diag::err_block_decl_ref_not_modifiable_lvalue;
+ DiagID = diag::err_block_decl_ref_not_modifiable_lvalue;
else
- Diag = diag::err_lambda_decl_ref_not_modifiable_lvalue;
+ DiagID = diag::err_lambda_decl_ref_not_modifiable_lvalue;
break;
}
@@ -8526,18 +8662,18 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
// - self
ObjCMethodDecl *method = S.getCurMethodDecl();
if (method && var == method->getSelfDecl())
- Diag = method->isClassMethod()
+ DiagID = method->isClassMethod()
? diag::err_typecheck_arc_assign_self_class_method
: diag::err_typecheck_arc_assign_self;
// - fast enumeration variables
else
- Diag = diag::err_typecheck_arr_assign_enumeration;
+ DiagID = diag::err_typecheck_arr_assign_enumeration;
SourceRange Assign;
if (Loc != OrigLoc)
Assign = SourceRange(OrigLoc, OrigLoc);
- S.Diag(Loc, Diag) << E->getSourceRange() << Assign;
+ S.Diag(Loc, DiagID) << E->getSourceRange() << Assign;
// We need to preserve the AST regardless, so migration tool
// can do its job.
return false;
@@ -8548,37 +8684,37 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
break;
case Expr::MLV_ArrayType:
case Expr::MLV_ArrayTemporary:
- Diag = diag::err_typecheck_array_not_modifiable_lvalue;
+ DiagID = diag::err_typecheck_array_not_modifiable_lvalue;
NeedType = true;
break;
case Expr::MLV_NotObjectType:
- Diag = diag::err_typecheck_non_object_not_modifiable_lvalue;
+ DiagID = diag::err_typecheck_non_object_not_modifiable_lvalue;
NeedType = true;
break;
case Expr::MLV_LValueCast:
- Diag = diag::err_typecheck_lvalue_casts_not_supported;
+ DiagID = diag::err_typecheck_lvalue_casts_not_supported;
break;
case Expr::MLV_Valid:
llvm_unreachable("did not take early return for MLV_Valid");
case Expr::MLV_InvalidExpression:
case Expr::MLV_MemberFunction:
case Expr::MLV_ClassTemporary:
- Diag = diag::err_typecheck_expression_not_modifiable_lvalue;
+ DiagID = diag::err_typecheck_expression_not_modifiable_lvalue;
break;
case Expr::MLV_IncompleteType:
case Expr::MLV_IncompleteVoidType:
return S.RequireCompleteType(Loc, E->getType(),
diag::err_typecheck_incomplete_type_not_modifiable_lvalue, E);
case Expr::MLV_DuplicateVectorComponents:
- Diag = diag::err_typecheck_duplicate_vector_components_not_mlvalue;
+ DiagID = diag::err_typecheck_duplicate_vector_components_not_mlvalue;
break;
case Expr::MLV_NoSetterProperty:
llvm_unreachable("readonly properties should be processed differently");
case Expr::MLV_InvalidMessageExpression:
- Diag = diag::error_readonly_message_assignment;
+ DiagID = diag::error_readonly_message_assignment;
break;
case Expr::MLV_SubObjCPropertySetting:
- Diag = diag::error_no_subobject_property_setting;
+ DiagID = diag::error_no_subobject_property_setting;
break;
}
@@ -8586,9 +8722,9 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
if (Loc != OrigLoc)
Assign = SourceRange(OrigLoc, OrigLoc);
if (NeedType)
- S.Diag(Loc, Diag) << E->getType() << E->getSourceRange() << Assign;
+ S.Diag(Loc, DiagID) << E->getType() << E->getSourceRange() << Assign;
else
- S.Diag(Loc, Diag) << E->getSourceRange() << Assign;
+ S.Diag(Loc, DiagID) << E->getSourceRange() << Assign;
return true;
}
@@ -8754,6 +8890,7 @@ static QualType CheckCommaOperands(Sema &S, ExprResult &LHS, ExprResult &RHS,
/// doesn't need to call UsualUnaryConversions or UsualArithmeticConversions.
static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
ExprValueKind &VK,
+ ExprObjectKind &OK,
SourceLocation OpLoc,
bool IsInc, bool IsPrefix) {
if (Op->isTypeDependent())
@@ -8799,7 +8936,7 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
} else if (ResType->isPlaceholderType()) {
ExprResult PR = S.CheckPlaceholderExpr(Op);
if (PR.isInvalid()) return QualType();
- return CheckIncrementDecrementOperand(S, PR.get(), VK, OpLoc,
+ return CheckIncrementDecrementOperand(S, PR.get(), VK, OK, OpLoc,
IsInc, IsPrefix);
} else if (S.getLangOpts().AltiVec && ResType->isVectorType()) {
// OK! ( C/C++ Language Extensions for CBEA(Version 2.6) 10.3 )
@@ -8820,6 +8957,7 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
// operand.
if (IsPrefix && S.getLangOpts().CPlusPlus) {
VK = VK_LValue;
+ OK = Op->getObjectKind();
return ResType;
} else {
VK = VK_RValue;
@@ -9104,6 +9242,24 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
return Context.getPointerType(op->getType());
}
+static void RecordModifiableNonNullParam(Sema &S, const Expr *Exp) {
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp);
+ if (!DRE)
+ return;
+ const Decl *D = DRE->getDecl();
+ if (!D)
+ return;
+ const ParmVarDecl *Param = dyn_cast<ParmVarDecl>(D);
+ if (!Param)
+ return;
+ if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(Param->getDeclContext()))
+ if (!FD->hasAttr<NonNullAttr>() && !Param->hasAttr<NonNullAttr>())
+ return;
+ if (FunctionScopeInfo *FD = S.getCurFunction())
+ if (!FD->ModifiedNonNullParams.count(Param))
+ FD->ModifiedNonNullParams.insert(Param);
+}
+
/// CheckIndirectionOperand - Type check unary indirection (prefix '*').
static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK,
SourceLocation OpLoc) {
@@ -9164,8 +9320,7 @@ static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK,
return Result;
}
-static inline BinaryOperatorKind ConvertTokenKindToBinaryOpcode(
- tok::TokenKind Kind) {
+BinaryOperatorKind Sema::ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind) {
BinaryOperatorKind Opc;
switch (Kind) {
default: llvm_unreachable("Unknown binop!");
@@ -9334,6 +9489,16 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
ExprValueKind VK = VK_RValue;
ExprObjectKind OK = OK_Ordinary;
+ if (!getLangOpts().CPlusPlus) {
+ // C cannot handle TypoExpr nodes on either side of a binop because it
+ // doesn't handle dependent types properly, so make sure any TypoExprs have
+ // been dealt with before checking the operands.
+ LHS = CorrectDelayedTyposInExpr(LHSExpr);
+ RHS = CorrectDelayedTyposInExpr(RHSExpr);
+ if (!LHS.isUsable() || !RHS.isUsable())
+ return ExprError();
+ }
+
switch (Opc) {
case BO_Assign:
ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType());
@@ -9342,8 +9507,11 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
VK = LHS.get()->getValueKind();
OK = LHS.get()->getObjectKind();
}
- if (!ResultTy.isNull())
+ if (!ResultTy.isNull()) {
DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc);
+ DiagnoseSelfMove(LHS.get(), RHS.get(), OpLoc);
+ }
+ RecordModifiableNonNullParam(*this, LHS.get());
break;
case BO_PtrMemD:
case BO_PtrMemI:
@@ -9503,7 +9671,7 @@ static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc,
StringRef OpStr = isLeftComp ? LHSBO->getOpcodeStr() : RHSBO->getOpcodeStr();
SourceRange ParensRange = isLeftComp ?
SourceRange(LHSBO->getRHS()->getLocStart(), RHSExpr->getLocEnd())
- : SourceRange(LHSExpr->getLocStart(), RHSBO->getLHS()->getLocStart());
+ : SourceRange(LHSExpr->getLocStart(), RHSBO->getLHS()->getLocEnd());
Self.Diag(OpLoc, diag::warn_precedence_bitwise_rel)
<< DiagRange << BinaryOperator::getOpcodeStr(Opc) << OpStr;
@@ -9709,7 +9877,7 @@ static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc,
UnresolvedSet<16> Functions;
OverloadedOperatorKind OverOp
= BinaryOperator::getOverloadedOperator(Opc);
- if (Sc && OverOp != OO_None)
+ if (Sc && OverOp != OO_None && OverOp != OO_Equal)
S.LookupOverloadedOperatorName(OverOp, Sc, LHS->getType(),
RHS->getType(), Functions);
@@ -9808,7 +9976,8 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
case UO_PreDec:
case UO_PostInc:
case UO_PostDec:
- resultType = CheckIncrementDecrementOperand(*this, Input.get(), VK, OpLoc,
+ resultType = CheckIncrementDecrementOperand(*this, Input.get(), VK, OK,
+ OpLoc,
Opc == UO_PreInc ||
Opc == UO_PostInc,
Opc == UO_PreInc ||
@@ -9816,6 +9985,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
break;
case UO_AddrOf:
resultType = CheckAddressOfOperand(Input, OpLoc);
+ RecordModifiableNonNullParam(*this, InputExpr);
break;
case UO_Deref: {
Input = DefaultFunctionArrayLvalueConversion(Input.get());
@@ -10107,11 +10277,6 @@ Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
assert(!ExprNeedsCleanups && "cleanups within StmtExpr not correctly bound!");
PopExpressionEvaluationContext();
- bool isFileScope
- = (getCurFunctionOrMethodDecl() == nullptr) && (getCurBlock() == nullptr);
- if (isFileScope)
- return ExprError(Diag(LPLoc, diag::err_stmtexpr_file_scope));
-
// FIXME: there are a variety of strange constraints to enforce here, for
// example, it is not possible to goto into a stmt expression apparently.
// More semantic analysis is needed.
@@ -10998,7 +11163,7 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
PartialDiagnostic FDiag = PDiag(DiagKind);
if (Action == AA_Passing_CFAudited)
- FDiag << FirstType << SecondType << SrcExpr->getSourceRange();
+ FDiag << FirstType << SecondType << AA_Passing << SrcExpr->getSourceRange();
else
FDiag << FirstType << SecondType << Action << SrcExpr->getSourceRange();
@@ -11285,6 +11450,7 @@ Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
void Sema::PopExpressionEvaluationContext() {
ExpressionEvaluationContextRecord& Rec = ExprEvalContexts.back();
+ unsigned NumTypos = Rec.NumTypos;
if (!Rec.Lambdas.empty()) {
if (Rec.isUnevaluated() || Rec.Context == ConstantEvaluated) {
@@ -11301,19 +11467,14 @@ void Sema::PopExpressionEvaluationContext() {
// evaluate [...] a lambda-expression.
D = diag::err_lambda_in_constant_expression;
}
- for (unsigned I = 0, N = Rec.Lambdas.size(); I != N; ++I)
- Diag(Rec.Lambdas[I]->getLocStart(), D);
+ for (const auto *L : Rec.Lambdas)
+ Diag(L->getLocStart(), D);
} else {
// Mark the capture expressions odr-used. This was deferred
// during lambda expression creation.
- for (unsigned I = 0, N = Rec.Lambdas.size(); I != N; ++I) {
- LambdaExpr *Lambda = Rec.Lambdas[I];
- for (LambdaExpr::capture_init_iterator
- C = Lambda->capture_init_begin(),
- CEnd = Lambda->capture_init_end();
- C != CEnd; ++C) {
- MarkDeclarationsReferencedInExpr(*C);
- }
+ for (auto *Lambda : Rec.Lambdas) {
+ for (auto *C : Lambda->capture_inits())
+ MarkDeclarationsReferencedInExpr(C);
}
}
}
@@ -11337,6 +11498,12 @@ void Sema::PopExpressionEvaluationContext() {
// Pop the current expression evaluation context off the stack.
ExprEvalContexts.pop_back();
+
+ if (!ExprEvalContexts.empty())
+ ExprEvalContexts.back().NumTypos += NumTypos;
+ else
+ assert(NumTypos == 0 && "There are outstanding typos after popping the "
+ "last ExpressionEvaluationContextRecord");
}
void Sema::DiscardCleanupsInEvaluationContext() {
@@ -11385,7 +11552,8 @@ static bool IsPotentiallyEvaluatedContext(Sema &SemaRef) {
/// \brief Mark a function referenced, and check whether it is odr-used
/// (C++ [basic.def.odr]p2, C99 6.9p3)
-void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func) {
+void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
+ bool OdrUse) {
assert(Func && "No function?");
Func->setReferenced();
@@ -11484,6 +11652,8 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func) {
if (FPT && isUnresolvedExceptionSpec(FPT->getExceptionSpecType()))
ResolveExceptionSpec(Loc, FPT);
+ if (!OdrUse) return;
+
// Implicit instantiation of function templates and member functions of
// class templates.
if (Func->isImplicitlyInstantiable()) {
@@ -11636,7 +11806,7 @@ static DeclContext *getParentOfCapturingContextOrNull(DeclContext *DC, VarDecl *
const bool Diagnose, Sema &S) {
if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC) || isLambdaCallOperator(DC))
return getLambdaAwareParentOfDeclContext(DC);
- else {
+ else if (Var->hasLocalStorage()) {
if (Diagnose)
diagnoseUncapturableValueReference(S, Loc, Var, DC);
}
@@ -11665,13 +11835,10 @@ static bool isVariableCapturable(CapturingScopeInfo *CSI, VarDecl *Var,
return false;
}
- // Prohibit variably-modified types; they're difficult to deal with.
- if (Var->getType()->isVariablyModifiedType() && (IsBlock || IsLambda)) {
+ // Prohibit variably-modified types in blocks; they're difficult to deal with.
+ if (Var->getType()->isVariablyModifiedType() && IsBlock) {
if (Diagnose) {
- if (IsBlock)
- S.Diag(Loc, diag::err_ref_vm_type);
- else
- S.Diag(Loc, diag::err_lambda_capture_vm_type) << Var->getDeclName();
+ S.Diag(Loc, diag::err_ref_vm_type);
S.Diag(Var->getLocation(), diag::note_previous_decl)
<< Var->getDeclName();
}
@@ -11807,7 +11974,7 @@ static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
const bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
- const bool RefersToEnclosingLocal,
+ const bool RefersToCapturedVariable,
Sema &S) {
// By default, capture variables by reference.
@@ -11829,7 +11996,7 @@ static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
Field->setAccess(AS_private);
RD->addDecl(Field);
- CopyExpr = new (S.Context) DeclRefExpr(Var, RefersToEnclosingLocal,
+ CopyExpr = new (S.Context) DeclRefExpr(Var, RefersToCapturedVariable,
DeclRefType, VK_LValue, Loc);
Var->setReferenced(true);
Var->markUsed(S.Context);
@@ -11837,7 +12004,7 @@ static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
// Actually capture the variable.
if (BuildAndDiagnose)
- RSI->addCapture(Var, /*isBlock*/false, ByRef, RefersToEnclosingLocal, Loc,
+ RSI->addCapture(Var, /*isBlock*/false, ByRef, RefersToCapturedVariable, Loc,
SourceLocation(), CaptureType, CopyExpr);
@@ -11851,7 +12018,7 @@ static ExprResult addAsFieldToClosureType(Sema &S,
VarDecl *Var, QualType FieldType,
QualType DeclRefType,
SourceLocation Loc,
- bool RefersToEnclosingLocal) {
+ bool RefersToCapturedVariable) {
CXXRecordDecl *Lambda = LSI->Lambda;
// Build the non-static data member.
@@ -11880,7 +12047,7 @@ static ExprResult addAsFieldToClosureType(Sema &S,
// C++ [expr.prim.labda]p12:
// An entity captured by a lambda-expression is odr-used (3.2) in
// the scope containing the lambda-expression.
- Expr *Ref = new (S.Context) DeclRefExpr(Var, RefersToEnclosingLocal,
+ Expr *Ref = new (S.Context) DeclRefExpr(Var, RefersToCapturedVariable,
DeclRefType, VK_LValue, Loc);
Var->setReferenced(true);
Var->markUsed(S.Context);
@@ -11974,7 +12141,7 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
const bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
- const bool RefersToEnclosingLocal,
+ const bool RefersToCapturedVariable,
const Sema::TryCaptureKind Kind,
SourceLocation EllipsisLoc,
const bool IsTopScope,
@@ -12048,7 +12215,7 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
if (BuildAndDiagnose) {
ExprResult Result = addAsFieldToClosureType(S, LSI, Var,
CaptureType, DeclRefType, Loc,
- RefersToEnclosingLocal);
+ RefersToCapturedVariable);
if (!Result.isInvalid())
CopyExpr = Result.get();
}
@@ -12069,20 +12236,19 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
// Add the capture.
if (BuildAndDiagnose)
- LSI->addCapture(Var, /*IsBlock=*/false, ByRef, RefersToEnclosingLocal,
+ LSI->addCapture(Var, /*IsBlock=*/false, ByRef, RefersToCapturedVariable,
Loc, EllipsisLoc, CaptureType, CopyExpr);
return true;
}
-
bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation ExprLoc,
TryCaptureKind Kind, SourceLocation EllipsisLoc,
bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt) {
- bool Nested = false;
+ bool Nested = Var->isInitCapture();
DeclContext *DC = CurContext;
const unsigned MaxFunctionScopesIndex = FunctionScopeIndexToStopAt
@@ -12100,8 +12266,13 @@ bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation ExprLoc,
// If the variable is declared in the current context (and is not an
// init-capture), there is no need to capture it.
- if (!Var->isInitCapture() && Var->getDeclContext() == DC) return true;
- if (!Var->hasLocalStorage()) return true;
+ if (!Nested && Var->getDeclContext() == DC) return true;
+
+ // Capture global variables if it is required to use private copy of this
+ // variable.
+ bool IsGlobal = !Var->hasLocalStorage();
+ if (IsGlobal && !(LangOpts.OpenMP && IsOpenMPCapturedVar(Var)))
+ return true;
// Walk up the stack to determine whether we can capture the variable,
// performing the "simple" checks that don't depend on type. We stop when
@@ -12122,8 +12293,17 @@ bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation ExprLoc,
ExprLoc,
BuildAndDiagnose,
*this);
- if (!ParentDC) return true;
-
+ // We need to check for the parent *first* because, if we *have*
+ // private-captured a global variable, we need to recursively capture it in
+ // intermediate blocks, lambdas, etc.
+ if (!ParentDC) {
+ if (IsGlobal) {
+ FunctionScopesIndex = MaxFunctionScopesIndex - 1;
+ break;
+ }
+ return true;
+ }
+
FunctionScopeInfo *FSI = FunctionScopes[FunctionScopesIndex];
CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FSI);
@@ -12212,14 +12392,37 @@ bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation ExprLoc,
break;
case Type::VariableArray: {
// Losing element qualification here is fine.
- const VariableArrayType *Vat = cast<VariableArrayType>(Ty);
+ const VariableArrayType *VAT = cast<VariableArrayType>(Ty);
// Unknown size indication requires no size computation.
// Otherwise, evaluate and record it.
- if (Expr *Size = Vat->getSizeExpr()) {
- MarkDeclarationsReferencedInExpr(Size);
+ if (auto Size = VAT->getSizeExpr()) {
+ if (!CSI->isVLATypeCaptured(VAT)) {
+ RecordDecl *CapRecord = nullptr;
+ if (auto LSI = dyn_cast<LambdaScopeInfo>(CSI)) {
+ CapRecord = LSI->Lambda;
+ } else if (auto CRSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) {
+ CapRecord = CRSI->TheRecordDecl;
+ }
+ if (CapRecord) {
+ auto ExprLoc = Size->getExprLoc();
+ auto SizeType = Context.getSizeType();
+ // Build the non-static data member.
+ auto Field = FieldDecl::Create(
+ Context, CapRecord, ExprLoc, ExprLoc,
+ /*Id*/ nullptr, SizeType, /*TInfo*/ nullptr,
+ /*BW*/ nullptr, /*Mutable*/ false,
+ /*InitStyle*/ ICIS_NoInit);
+ Field->setImplicit(true);
+ Field->setAccess(AS_private);
+ Field->setCapturedVLAType(VAT);
+ CapRecord->addDecl(Field);
+
+ CSI->addVLATypeCapture(ExprLoc, SizeType);
+ }
+ }
}
- QTy = Vat->getElementType();
+ QTy = VAT->getElementType();
break;
}
case Type::FunctionProto:
@@ -12326,6 +12529,14 @@ bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
DeclRefType, nullptr);
}
+bool Sema::NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc) {
+ QualType CaptureType;
+ QualType DeclRefType;
+ return !tryCaptureVariable(Var, Loc, TryCapture_Implicit, SourceLocation(),
+ /*BuildAndDiagnose=*/false, CaptureType,
+ DeclRefType, nullptr);
+}
+
QualType Sema::getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc) {
QualType CaptureType;
QualType DeclRefType;
@@ -12388,6 +12599,8 @@ void Sema::UpdateMarkingForLValueToRValue(Expr *E) {
}
ExprResult Sema::ActOnConstantExpression(ExprResult Res) {
+ Res = CorrectDelayedTyposInExpr(Res);
+
if (!Res.isUsable())
return Res;
@@ -12412,7 +12625,7 @@ void Sema::CleanupVarDeclMarking() {
Var = cast<VarDecl>(ME->getMemberDecl());
Loc = ME->getMemberLoc();
} else {
- llvm_unreachable("Unexpcted expression");
+ llvm_unreachable("Unexpected expression");
}
MarkVarDeclODRUsed(Var, Loc, *this,
@@ -12429,6 +12642,9 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
"Invalid Expr argument to DoMarkVarDeclReferenced");
Var->setReferenced();
+ TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind();
+ bool MarkODRUsed = true;
+
// If the context is not potentially evaluated, this is not an odr-use and
// does not trigger instantiation.
if (!IsPotentiallyEvaluatedContext(SemaRef)) {
@@ -12443,25 +12659,29 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
// arguments, where local variables can't be used.
const bool RefersToEnclosingScope =
(SemaRef.CurContext != Var->getDeclContext() &&
- Var->getDeclContext()->isFunctionOrMethod() &&
- Var->hasLocalStorage());
- if (!RefersToEnclosingScope)
- return;
-
- if (LambdaScopeInfo *const LSI = SemaRef.getCurLambda()) {
- // If a variable could potentially be odr-used, defer marking it so
- // until we finish analyzing the full expression for any lvalue-to-rvalue
- // or discarded value conversions that would obviate odr-use.
- // Add it to the list of potential captures that will be analyzed
- // later (ActOnFinishFullExpr) for eventual capture and odr-use marking
- // unless the variable is a reference that was initialized by a constant
- // expression (this will never need to be captured or odr-used).
- assert(E && "Capture variable should be used in an expression.");
- if (!Var->getType()->isReferenceType() ||
- !IsVariableNonDependentAndAConstantExpression(Var, SemaRef.Context))
- LSI->addPotentialCapture(E->IgnoreParens());
+ Var->getDeclContext()->isFunctionOrMethod() && Var->hasLocalStorage());
+ if (RefersToEnclosingScope) {
+ if (LambdaScopeInfo *const LSI = SemaRef.getCurLambda()) {
+ // If a variable could potentially be odr-used, defer marking it so
+ // until we finish analyzing the full expression for any
+ // lvalue-to-rvalue
+ // or discarded value conversions that would obviate odr-use.
+ // Add it to the list of potential captures that will be analyzed
+ // later (ActOnFinishFullExpr) for eventual capture and odr-use marking
+ // unless the variable is a reference that was initialized by a constant
+ // expression (this will never need to be captured or odr-used).
+ assert(E && "Capture variable should be used in an expression.");
+ if (!Var->getType()->isReferenceType() ||
+ !IsVariableNonDependentAndAConstantExpression(Var, SemaRef.Context))
+ LSI->addPotentialCapture(E->IgnoreParens());
+ }
}
- return;
+
+ if (!isTemplateInstantiation(TSK))
+ return;
+
+ // Instantiate, but do not mark as odr-used, variable templates.
+ MarkODRUsed = false;
}
VarTemplateSpecializationDecl *VarSpec =
@@ -12473,7 +12693,6 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
// templates of class templates, and variable template specializations. Delay
// instantiations of variable templates, except for those that could be used
// in a constant expression.
- TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind();
if (isTemplateInstantiation(TSK)) {
bool TryInstantiating = TSK == TSK_ImplicitInstantiation;
@@ -12513,6 +12732,8 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
}
}
+ if(!MarkODRUsed) return;
+
// Per C++11 [basic.def.odr], a variable is odr-used "unless it satisfies
// the requirements for appearing in a constant expression (5.19) and, if
// it is an object, the lvalue-to-rvalue conversion (4.1)
@@ -12555,6 +12776,10 @@ static void MarkExprReferenced(Sema &SemaRef, SourceLocation Loc,
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ME->getMemberDecl());
if (!MD)
return;
+ // Only attempt to devirtualize if this is truly a virtual call.
+ bool IsVirtualCall = MD->isVirtual() && !ME->hasQualifier();
+ if (!IsVirtualCall)
+ return;
const Expr *Base = ME->getBase();
const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
if (!MostDerivedClassDecl)
@@ -12602,14 +12827,14 @@ void Sema::MarkMemberReferenced(MemberExpr *E) {
/// normal expression which refers to a variable.
void Sema::MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse) {
if (OdrUse) {
- if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (auto *VD = dyn_cast<VarDecl>(D)) {
MarkVariableReferenced(Loc, VD);
return;
}
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- MarkFunctionReferenced(Loc, FD);
- return;
- }
+ }
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ MarkFunctionReferenced(Loc, FD, OdrUse);
+ return;
}
D->setReferenced();
}
@@ -12935,6 +13160,7 @@ ExprResult Sema::CheckBooleanCondition(Expr *E, SourceLocation Loc) {
<< T << E->getSourceRange();
return ExprError();
}
+ CheckBoolLikeConversion(E, Loc);
}
return E;
@@ -13306,6 +13532,39 @@ ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) {
<< VD << E->getSourceRange();
return ExprError();
}
+ if (const FunctionProtoType *FT = Type->getAs<FunctionProtoType>()) {
+ // We must match the FunctionDecl's type to the hack introduced in
+ // RebuildUnknownAnyExpr::VisitCallExpr to vararg functions of unknown
+ // type. See the lengthy commentary in that routine.
+ QualType FDT = FD->getType();
+ const FunctionType *FnType = FDT->castAs<FunctionType>();
+ const FunctionProtoType *Proto = dyn_cast_or_null<FunctionProtoType>(FnType);
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
+ if (DRE && Proto && Proto->getParamTypes().empty() && Proto->isVariadic()) {
+ SourceLocation Loc = FD->getLocation();
+ FunctionDecl *NewFD = FunctionDecl::Create(FD->getASTContext(),
+ FD->getDeclContext(),
+ Loc, Loc, FD->getNameInfo().getName(),
+ DestType, FD->getTypeSourceInfo(),
+ SC_None, false/*isInlineSpecified*/,
+ FD->hasPrototype(),
+ false/*isConstexprSpecified*/);
+
+ if (FD->getQualifier())
+ NewFD->setQualifierInfo(FD->getQualifierLoc());
+
+ SmallVector<ParmVarDecl*, 16> Params;
+ for (const auto &AI : FT->param_types()) {
+ ParmVarDecl *Param =
+ S.BuildParmVarDeclForTypedef(FD, Loc, AI);
+ Param->setScopeInfo(0, Params.size());
+ Params.push_back(Param);
+ }
+ NewFD->setParams(Params);
+ DRE->setDecl(NewFD);
+ VD = DRE->getDecl();
+ }
+ }
if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (MD->isInstance()) {
@@ -13431,6 +13690,15 @@ static ExprResult diagnoseUnknownAnyExpr(Sema &S, Expr *E) {
/// Check for operands with placeholder types and complain if found.
/// Returns true if there was an error and no recovery was possible.
ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
+ if (!getLangOpts().CPlusPlus) {
+ // C cannot handle TypoExpr nodes on either side of a binop because it
+ // doesn't handle dependent types properly, so make sure any TypoExprs have
+ // been dealt with before checking the operands.
+ ExprResult Result = CorrectDelayedTyposInExpr(E);
+ if (!Result.isUsable()) return ExprError();
+ E = Result.get();
+ }
+
const BuiltinType *placeholderType = E->getType()->getAsPlaceholderType();
if (!placeholderType) return E;
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index 0dabdca29553..422398ebeb1e 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Sema/SemaInternal.h"
+#include "TreeTransform.h"
#include "TypeLocBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
@@ -67,6 +68,7 @@ ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
break;
case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
llvm_unreachable("Nested name specifier is not a type for inheriting ctor");
@@ -198,6 +200,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
QualType T = Context.getTypeDeclType(Type);
+ MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
if (SearchType.isNull() || SearchType->isDependentType() ||
Context.hasSameUnqualifiedType(T, SearchType)) {
@@ -354,6 +357,7 @@ bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
return true;
case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
return false;
@@ -380,6 +384,9 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
return ExprError();
+ if (T->isVariablyModifiedType())
+ return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid) << T);
+
return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), Operand,
SourceRange(TypeidLoc, RParenLoc));
}
@@ -389,6 +396,7 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *E,
SourceLocation RParenLoc) {
+ bool WasEvaluated = false;
if (E && !E->isTypeDependent()) {
if (E->getType()->isPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(E);
@@ -418,6 +426,7 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
// We require a vtable to query the type at run time.
MarkVTableUsed(TypeidLoc, RecordD);
+ WasEvaluated = true;
}
}
@@ -434,6 +443,18 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
}
}
+ if (E->getType()->isVariablyModifiedType())
+ return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid)
+ << E->getType());
+ else if (ActiveTemplateInstantiations.empty() &&
+ E->HasSideEffects(Context, WasEvaluated)) {
+ // The expression operand for typeid is in an unevaluated expression
+ // context, so side effects could result in unintended consequences.
+ Diag(E->getExprLoc(), WasEvaluated
+ ? diag::warn_side_effects_typeid
+ : diag::warn_side_effects_unevaluated_context);
+ }
+
return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), E,
SourceRange(TypeidLoc, RParenLoc));
}
@@ -580,15 +601,15 @@ Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
bool IsThrownVarInScope = false;
if (Ex) {
// C++0x [class.copymove]p31:
- // When certain criteria are met, an implementation is allowed to omit the
+ // When certain criteria are met, an implementation is allowed to omit the
// copy/move construction of a class object [...]
//
- // - in a throw-expression, when the operand is the name of a
+ // - in a throw-expression, when the operand is the name of a
// non-volatile automatic object (other than a function or catch-
- // clause parameter) whose scope does not extend beyond the end of the
- // innermost enclosing try-block (if there is one), the copy/move
- // operation from the operand to the exception object (15.1) can be
- // omitted by constructing the automatic object directly into the
+ // clause parameter) whose scope does not extend beyond the end of the
+ // innermost enclosing try-block (if there is one), the copy/move
+ // operation from the operand to the exception object (15.1) can be
+ // omitted by constructing the automatic object directly into the
// exception object
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Ex->IgnoreParens()))
if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
@@ -1083,7 +1104,7 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(I).Arr;
if (Expr *NumElts = (Expr *)Array.NumElts) {
if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
- if (getLangOpts().CPlusPlus1y) {
+ if (getLangOpts().CPlusPlus14) {
// C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator
// shall be a converted constant expression (5.19) of type std::size_t
// and shall evaluate to a strictly positive value.
@@ -1257,7 +1278,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// std::size_t.
if (ArraySize && !ArraySize->isTypeDependent()) {
ExprResult ConvertedSize;
- if (getLangOpts().CPlusPlus1y) {
+ if (getLangOpts().CPlusPlus14) {
assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?");
ConvertedSize = PerformImplicitConversion(ArraySize, Context.getSizeType(),
@@ -2072,19 +2093,18 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
if (!getLangOpts().CPlusPlus11) {
BadAllocType = Context.getTypeDeclType(getStdBadAlloc());
assert(StdBadAlloc && "Must have std::bad_alloc declared");
- EPI.ExceptionSpecType = EST_Dynamic;
- EPI.NumExceptions = 1;
- EPI.Exceptions = &BadAllocType;
+ EPI.ExceptionSpec.Type = EST_Dynamic;
+ EPI.ExceptionSpec.Exceptions = llvm::makeArrayRef(BadAllocType);
}
} else {
- EPI.ExceptionSpecType = getLangOpts().CPlusPlus11 ?
- EST_BasicNoexcept : EST_DynamicNone;
+ EPI.ExceptionSpec =
+ getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
}
QualType Params[] = { Param1, Param2 };
QualType FnType = Context.getFunctionType(
- Return, ArrayRef<QualType>(Params, NumParams), EPI);
+ Return, llvm::makeArrayRef(Params, NumParams), EPI);
FunctionDecl *Alloc =
FunctionDecl::Create(Context, GlobalCtx, SourceLocation(),
SourceLocation(), Name,
@@ -2102,7 +2122,7 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
SC_None, nullptr);
ParamDecls[I]->setImplicit();
}
- Alloc->setParams(ArrayRef<ParmVarDecl*>(ParamDecls, NumParams));
+ Alloc->setParams(llvm::makeArrayRef(ParamDecls, NumParams));
Context.getTranslationUnitDecl()->addDecl(Alloc);
IdResolver.tryAddTopLevelDecl(Alloc, Name);
@@ -2622,8 +2642,8 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// Do no conversion if dealing with ... for the first conversion.
if (!ICS.UserDefined.EllipsisConversion) {
// If the user-defined conversion is specified by a constructor, the
- // initial standard conversion sequence converts the source type to the
- // type required by the argument of the constructor
+ // initial standard conversion sequence converts the source type to
+ // the type required by the argument of the constructor
BeforeToType = Ctor->getParamDecl(0)->getType().getNonReferenceType();
}
}
@@ -2739,15 +2759,19 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// Perform the first implicit conversion.
switch (SCS.First) {
case ICK_Identity:
- // Nothing to do.
+ if (const AtomicType *FromAtomic = FromType->getAs<AtomicType>()) {
+ FromType = FromAtomic->getValueType().getUnqualifiedType();
+ From = ImplicitCastExpr::Create(Context, FromType, CK_AtomicToNonAtomic,
+ From, /*BasePath=*/nullptr, VK_RValue);
+ }
break;
case ICK_Lvalue_To_Rvalue: {
assert(From->getObjectKind() != OK_ObjCProperty);
- FromType = FromType.getUnqualifiedType();
ExprResult FromRes = DefaultLvalueConversion(From);
assert(!FromRes.isInvalid() && "Can't perform deduced conversion?!");
From = FromRes.get();
+ FromType = From->getType();
break;
}
@@ -2770,10 +2794,29 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// Perform the second implicit conversion
switch (SCS.Second) {
case ICK_Identity:
- // If both sides are functions (or pointers/references to them), there could
- // be incompatible exception declarations.
- if (CheckExceptionSpecCompatibility(From, ToType))
- return ExprError();
+ // C++ [except.spec]p5:
+ // [For] assignment to and initialization of pointers to functions,
+ // pointers to member functions, and references to functions: the
+ // target entity shall allow at least the exceptions allowed by the
+ // source value in the assignment or initialization.
+ switch (Action) {
+ case AA_Assigning:
+ case AA_Initializing:
+ // Note, function argument passing and returning are initialization.
+ case AA_Passing:
+ case AA_Returning:
+ case AA_Sending:
+ case AA_Passing_CFAudited:
+ if (CheckExceptionSpecCompatibility(From, ToType))
+ return ExprError();
+ break;
+
+ case AA_Casting:
+ case AA_Converting:
+ // Casts and implicit conversions are not initialization, so are not
+ // checked for exception specification mismatches.
+ break;
+ }
// Nothing else to do.
break;
@@ -2899,6 +2942,14 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
return ExprError();
if (CheckExceptionSpecCompatibility(From, ToType))
return ExprError();
+
+ // We may not have been able to figure out what this member pointer resolved
+ // to up until this exact point. Attempt to lock-in it's inheritance model.
+ QualType FromType = From->getType();
+ if (FromType->isMemberPointerType())
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft())
+ RequireCompleteType(From->getExprLoc(), FromType, 0);
+
From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK)
.get();
break;
@@ -3642,12 +3693,13 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
if (T->isObjectType() || T->isFunctionType())
T = S.Context.getRValueReferenceType(T);
OpaqueArgExprs.push_back(
- OpaqueValueExpr(Args[I]->getTypeLoc().getLocStart(),
+ OpaqueValueExpr(Args[I]->getTypeLoc().getLocStart(),
T.getNonLValueExprType(S.Context),
Expr::getValueKindForType(T)));
- ArgExprs.push_back(&OpaqueArgExprs.back());
}
-
+ for (Expr &E : OpaqueArgExprs)
+ ArgExprs.push_back(&E);
+
// Perform the initialization in an unevaluated context within a SFINAE
// trap at translation unit scope.
EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
@@ -4549,10 +4601,14 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// the usual arithmetic conversions are performed to bring them to a
// common type, and the result is of that type.
if (LTy->isArithmeticType() && RTy->isArithmeticType()) {
- UsualArithmeticConversions(LHS, RHS);
+ QualType ResTy = UsualArithmeticConversions(LHS, RHS);
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
- return LHS.get()->getType();
+
+ LHS = ImpCastExprToType(LHS.get(), ResTy, PrepareScalarCast(LHS, ResTy));
+ RHS = ImpCastExprToType(RHS.get(), ResTy, PrepareScalarCast(RHS, ResTy));
+
+ return ResTy;
}
// -- The second and third operands have pointer type, or one has pointer
@@ -4993,9 +5049,8 @@ Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
if (!ExprNeedsCleanups)
return SubExpr;
- ArrayRef<ExprWithCleanups::CleanupObject> Cleanups
- = llvm::makeArrayRef(ExprCleanupObjects.begin() + FirstCleanup,
- ExprCleanupObjects.size() - FirstCleanup);
+ auto Cleanups = llvm::makeArrayRef(ExprCleanupObjects.begin() + FirstCleanup,
+ ExprCleanupObjects.size() - FirstCleanup);
Expr *E = ExprWithCleanups::Create(Context, SubExpr, Cleanups);
DiscardCleanupsInEvaluationContext();
@@ -5231,7 +5286,7 @@ Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc,
OperatorArrows.push_back(OpCall->getDirectCallee());
BaseType = Base->getType();
CanQualType CBaseType = Context.getCanonicalType(BaseType);
- if (!CTypes.insert(CBaseType)) {
+ if (!CTypes.insert(CBaseType).second) {
Diag(OpLoc, diag::err_operator_arrow_circular) << StartingType;
noteOperatorArrows(*this, OperatorArrows);
return ExprError();
@@ -5581,7 +5636,8 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
return ExprError();
- QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc(),
+ false);
TypeLocBuilder TLB;
DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
@@ -5649,6 +5705,13 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen) {
+ if (ActiveTemplateInstantiations.empty() &&
+ Operand->HasSideEffects(Context, false)) {
+ // The expression operand for noexcept is in an unevaluated expression
+ // context, so side effects could result in unintended consequences.
+ Diag(Operand->getExprLoc(), diag::warn_side_effects_unevaluated_context);
+ }
+
CanThrowResult CanThrow = canThrow(Operand);
return new (Context)
CXXNoexceptExpr(Context.BoolTy, Operand, CanThrow, KeyLoc, RParen);
@@ -5905,6 +5968,284 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
CurrentLSI->clearPotentialCaptures();
}
+static ExprResult attemptRecovery(Sema &SemaRef,
+ const TypoCorrectionConsumer &Consumer,
+ TypoCorrection TC) {
+ LookupResult R(SemaRef, Consumer.getLookupResult().getLookupNameInfo(),
+ Consumer.getLookupResult().getLookupKind());
+ const CXXScopeSpec *SS = Consumer.getSS();
+ CXXScopeSpec NewSS;
+
+ // Use an approprate CXXScopeSpec for building the expr.
+ if (auto *NNS = TC.getCorrectionSpecifier())
+ NewSS.MakeTrivial(SemaRef.Context, NNS, TC.getCorrectionRange());
+ else if (SS && !TC.WillReplaceSpecifier())
+ NewSS = *SS;
+
+ if (auto *ND = TC.getCorrectionDecl()) {
+ R.setLookupName(ND->getDeclName());
+ R.addDecl(ND);
+ if (ND->isCXXClassMember()) {
+ // Figure out the correct naming class to add to the LookupResult.
+ CXXRecordDecl *Record = nullptr;
+ if (auto *NNS = TC.getCorrectionSpecifier())
+ Record = NNS->getAsType()->getAsCXXRecordDecl();
+ if (!Record)
+ Record =
+ dyn_cast<CXXRecordDecl>(ND->getDeclContext()->getRedeclContext());
+ if (Record)
+ R.setNamingClass(Record);
+
+ // Detect and handle the case where the decl might be an implicit
+ // member.
+ bool MightBeImplicitMember;
+ if (!Consumer.isAddressOfOperand())
+ MightBeImplicitMember = true;
+ else if (!NewSS.isEmpty())
+ MightBeImplicitMember = false;
+ else if (R.isOverloadedResult())
+ MightBeImplicitMember = false;
+ else if (R.isUnresolvableResult())
+ MightBeImplicitMember = true;
+ else
+ MightBeImplicitMember = isa<FieldDecl>(ND) ||
+ isa<IndirectFieldDecl>(ND) ||
+ isa<MSPropertyDecl>(ND);
+
+ if (MightBeImplicitMember)
+ return SemaRef.BuildPossibleImplicitMemberExpr(
+ NewSS, /*TemplateKWLoc*/ SourceLocation(), R,
+ /*TemplateArgs*/ nullptr);
+ } else if (auto *Ivar = dyn_cast<ObjCIvarDecl>(ND)) {
+ return SemaRef.LookupInObjCMethod(R, Consumer.getScope(),
+ Ivar->getIdentifier());
+ }
+ }
+
+ return SemaRef.BuildDeclarationNameExpr(NewSS, R, /*NeedsADL*/ false,
+ /*AcceptInvalidDecl*/ true);
+}
+
+namespace {
+class FindTypoExprs : public RecursiveASTVisitor<FindTypoExprs> {
+ llvm::SmallSetVector<TypoExpr *, 2> &TypoExprs;
+
+public:
+ explicit FindTypoExprs(llvm::SmallSetVector<TypoExpr *, 2> &TypoExprs)
+ : TypoExprs(TypoExprs) {}
+ bool VisitTypoExpr(TypoExpr *TE) {
+ TypoExprs.insert(TE);
+ return true;
+ }
+};
+
+class TransformTypos : public TreeTransform<TransformTypos> {
+ typedef TreeTransform<TransformTypos> BaseTransform;
+
+ llvm::function_ref<ExprResult(Expr *)> ExprFilter;
+ llvm::SmallSetVector<TypoExpr *, 2> TypoExprs, AmbiguousTypoExprs;
+ llvm::SmallDenseMap<TypoExpr *, ExprResult, 2> TransformCache;
+ llvm::SmallDenseMap<OverloadExpr *, Expr *, 4> OverloadResolution;
+
+ /// \brief Emit diagnostics for all of the TypoExprs encountered.
+ /// If the TypoExprs were successfully corrected, then the diagnostics should
+ /// suggest the corrections. Otherwise the diagnostics will not suggest
+ /// anything (having been passed an empty TypoCorrection).
+ void EmitAllDiagnostics() {
+ for (auto E : TypoExprs) {
+ TypoExpr *TE = cast<TypoExpr>(E);
+ auto &State = SemaRef.getTypoExprState(TE);
+ if (State.DiagHandler) {
+ TypoCorrection TC = State.Consumer->getCurrentCorrection();
+ ExprResult Replacement = TransformCache[TE];
+
+ // Extract the NamedDecl from the transformed TypoExpr and add it to the
+ // TypoCorrection, replacing the existing decls. This ensures the right
+ // NamedDecl is used in diagnostics e.g. in the case where overload
+ // resolution was used to select one from several possible decls that
+ // had been stored in the TypoCorrection.
+ if (auto *ND = getDeclFromExpr(
+ Replacement.isInvalid() ? nullptr : Replacement.get()))
+ TC.setCorrectionDecl(ND);
+
+ State.DiagHandler(TC);
+ }
+ SemaRef.clearDelayedTypo(TE);
+ }
+ }
+
+ /// \brief If corrections for the first TypoExpr have been exhausted for a
+ /// given combination of the other TypoExprs, retry those corrections against
+ /// the next combination of substitutions for the other TypoExprs by advancing
+ /// to the next potential correction of the second TypoExpr. For the second
+ /// and subsequent TypoExprs, if its stream of corrections has been exhausted,
+ /// the stream is reset and the next TypoExpr's stream is advanced by one (a
+ /// TypoExpr's correction stream is advanced by removing the TypoExpr from the
+ /// TransformCache). Returns true if there is still any untried combinations
+ /// of corrections.
+ bool CheckAndAdvanceTypoExprCorrectionStreams() {
+ for (auto TE : TypoExprs) {
+ auto &State = SemaRef.getTypoExprState(TE);
+ TransformCache.erase(TE);
+ if (!State.Consumer->finished())
+ return true;
+ State.Consumer->resetCorrectionStream();
+ }
+ return false;
+ }
+
+ NamedDecl *getDeclFromExpr(Expr *E) {
+ if (auto *OE = dyn_cast_or_null<OverloadExpr>(E))
+ E = OverloadResolution[OE];
+
+ if (!E)
+ return nullptr;
+ if (auto *DRE = dyn_cast<DeclRefExpr>(E))
+ return DRE->getDecl();
+ if (auto *ME = dyn_cast<MemberExpr>(E))
+ return ME->getMemberDecl();
+ // FIXME: Add any other expr types that could be be seen by the delayed typo
+ // correction TreeTransform for which the corresponding TypoCorrection could
+ // contain multiple decls.
+ return nullptr;
+ }
+
+ ExprResult TryTransform(Expr *E) {
+ Sema::SFINAETrap Trap(SemaRef);
+ ExprResult Res = TransformExpr(E);
+ if (Trap.hasErrorOccurred() || Res.isInvalid())
+ return ExprError();
+
+ return ExprFilter(Res.get());
+ }
+
+public:
+ TransformTypos(Sema &SemaRef, llvm::function_ref<ExprResult(Expr *)> Filter)
+ : BaseTransform(SemaRef), ExprFilter(Filter) {}
+
+ ExprResult RebuildCallExpr(Expr *Callee, SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc,
+ Expr *ExecConfig = nullptr) {
+ auto Result = BaseTransform::RebuildCallExpr(Callee, LParenLoc, Args,
+ RParenLoc, ExecConfig);
+ if (auto *OE = dyn_cast<OverloadExpr>(Callee)) {
+ if (Result.isUsable()) {
+ Expr *ResultCall = Result.get();
+ if (auto *BE = dyn_cast<CXXBindTemporaryExpr>(ResultCall))
+ ResultCall = BE->getSubExpr();
+ if (auto *CE = dyn_cast<CallExpr>(ResultCall))
+ OverloadResolution[OE] = CE->getCallee();
+ }
+ }
+ return Result;
+ }
+
+ ExprResult TransformLambdaExpr(LambdaExpr *E) { return Owned(E); }
+
+ ExprResult TransformOpaqueValueExpr(OpaqueValueExpr *E) {
+ if (Expr *SE = E->getSourceExpr())
+ return TransformExpr(SE);
+ return BaseTransform::TransformOpaqueValueExpr(E);
+ }
+
+ ExprResult Transform(Expr *E) {
+ ExprResult Res;
+ while (true) {
+ Res = TryTransform(E);
+
+ // Exit if either the transform was valid or if there were no TypoExprs
+ // to transform that still have any untried correction candidates..
+ if (!Res.isInvalid() ||
+ !CheckAndAdvanceTypoExprCorrectionStreams())
+ break;
+ }
+
+ // Ensure none of the TypoExprs have multiple typo correction candidates
+ // with the same edit length that pass all the checks and filters.
+ // TODO: Properly handle various permutations of possible corrections when
+ // there is more than one potentially ambiguous typo correction.
+ while (!AmbiguousTypoExprs.empty()) {
+ auto TE = AmbiguousTypoExprs.back();
+ auto Cached = TransformCache[TE];
+ AmbiguousTypoExprs.pop_back();
+ TransformCache.erase(TE);
+ if (!TryTransform(E).isInvalid()) {
+ SemaRef.getTypoExprState(TE).Consumer->resetCorrectionStream();
+ TransformCache.erase(TE);
+ Res = ExprError();
+ break;
+ } else
+ TransformCache[TE] = Cached;
+ }
+
+ // Ensure that all of the TypoExprs within the current Expr have been found.
+ if (!Res.isUsable())
+ FindTypoExprs(TypoExprs).TraverseStmt(E);
+
+ EmitAllDiagnostics();
+
+ return Res;
+ }
+
+ ExprResult TransformTypoExpr(TypoExpr *E) {
+ // If the TypoExpr hasn't been seen before, record it. Otherwise, return the
+ // cached transformation result if there is one and the TypoExpr isn't the
+ // first one that was encountered.
+ auto &CacheEntry = TransformCache[E];
+ if (!TypoExprs.insert(E) && !CacheEntry.isUnset()) {
+ return CacheEntry;
+ }
+
+ auto &State = SemaRef.getTypoExprState(E);
+ assert(State.Consumer && "Cannot transform a cleared TypoExpr");
+
+ // For the first TypoExpr and an uncached TypoExpr, find the next likely
+ // typo correction and return it.
+ while (TypoCorrection TC = State.Consumer->getNextCorrection()) {
+ ExprResult NE = State.RecoveryHandler ?
+ State.RecoveryHandler(SemaRef, E, TC) :
+ attemptRecovery(SemaRef, *State.Consumer, TC);
+ if (!NE.isInvalid()) {
+ // Check whether there may be a second viable correction with the same
+ // edit distance; if so, remember this TypoExpr may have an ambiguous
+ // correction so it can be more thoroughly vetted later.
+ TypoCorrection Next;
+ if ((Next = State.Consumer->peekNextCorrection()) &&
+ Next.getEditDistance(false) == TC.getEditDistance(false)) {
+ AmbiguousTypoExprs.insert(E);
+ } else {
+ AmbiguousTypoExprs.remove(E);
+ }
+ assert(!NE.isUnset() &&
+ "Typo was transformed into a valid-but-null ExprResult");
+ return CacheEntry = NE;
+ }
+ }
+ return CacheEntry = ExprError();
+ }
+};
+}
+
+ExprResult Sema::CorrectDelayedTyposInExpr(
+ Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) {
+ // If the current evaluation context indicates there are uncorrected typos
+ // and the current expression isn't guaranteed to not have typos, try to
+ // resolve any TypoExpr nodes that might be in the expression.
+ if (E && !ExprEvalContexts.empty() && ExprEvalContexts.back().NumTypos &&
+ (E->isTypeDependent() || E->isValueDependent() ||
+ E->isInstantiationDependent())) {
+ auto TyposResolved = DelayedTypos.size();
+ auto Result = TransformTypos(*this, Filter).Transform(E);
+ TyposResolved -= DelayedTypos.size();
+ if (Result.isInvalid() || Result.get() != E) {
+ ExprEvalContexts.back().NumTypos -= TyposResolved;
+ return Result;
+ }
+ assert(TyposResolved == 0 && "Corrected typo but got same Expr back?");
+ }
+ return E;
+}
ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
bool DiscardedValue,
@@ -5952,6 +6293,10 @@ ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
return ExprError();
}
+ FullExpr = CorrectDelayedTyposInExpr(FullExpr.get());
+ if (FullExpr.isInvalid())
+ return ExprError();
+
CheckCompletedExpr(FullExpr.get(), CC, IsConstexpr);
// At the end of this full expression (which could be a deeply nested
diff --git a/lib/Sema/SemaExprMember.cpp b/lib/Sema/SemaExprMember.cpp
index ef7898208cae..af1cf9046113 100644
--- a/lib/Sema/SemaExprMember.cpp
+++ b/lib/Sema/SemaExprMember.cpp
@@ -10,7 +10,7 @@
// This file implements semantic analysis member access expressions.
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Overload.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
@@ -21,6 +21,7 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
using namespace clang;
using namespace sema;
@@ -89,7 +90,6 @@ enum IMAKind {
/// conservatively answer "yes", in which case some errors will simply
/// not be caught until template-instantiation.
static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
- Scope *CurScope,
const LookupResult &R) {
assert(!R.empty() && (*R.begin())->isCXXClassMember());
@@ -204,6 +204,9 @@ static void diagnoseInstanceReference(Sema &SemaRef,
SourceRange Range(Loc);
if (SS.isSet()) Range.setBegin(SS.getRange().getBegin());
+ // Look through using shadow decls and aliases.
+ Rep = Rep->getUnderlyingDecl();
+
DeclContext *FunctionLevelDC = SemaRef.getFunctionLevelDeclContext();
CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FunctionLevelDC);
CXXRecordDecl *ContextClass = Method ? Method->getParent() : nullptr;
@@ -236,7 +239,7 @@ Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs) {
- switch (ClassifyImplicitMemberAccess(*this, CurScope, R)) {
+ switch (ClassifyImplicitMemberAccess(*this, R)) {
case IMA_Instance:
return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, true);
@@ -536,9 +539,17 @@ namespace {
// FunctionTemplateDecl and are declared in the current record or, for a C++
// classes, one of its base classes.
class RecordMemberExprValidatorCCC : public CorrectionCandidateCallback {
- public:
+public:
explicit RecordMemberExprValidatorCCC(const RecordType *RTy)
- : Record(RTy->getDecl()) {}
+ : Record(RTy->getDecl()) {
+ // Don't add bare keywords to the consumer since they will always fail
+ // validation by virtue of not being associated with any decls.
+ WantTypeSpecifiers = false;
+ WantExpressionKeywords = false;
+ WantCXXNamedCasts = false;
+ WantFunctionLikeCasts = false;
+ WantRemainingKeywords = false;
+ }
bool ValidateCandidate(const TypoCorrection &candidate) override {
NamedDecl *ND = candidate.getCorrectionDecl();
@@ -554,8 +565,8 @@ class RecordMemberExprValidatorCCC : public CorrectionCandidateCallback {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Record)) {
// Accept candidates that occur in any of the current class' base classes.
for (const auto &BS : RD->bases()) {
- if (const RecordType *BSTy = dyn_cast_or_null<RecordType>(
- BS.getType().getTypePtrOrNull())) {
+ if (const RecordType *BSTy =
+ dyn_cast_or_null<RecordType>(BS.getType().getTypePtrOrNull())) {
if (BSTy->getDecl()->containsDecl(ND))
return true;
}
@@ -565,17 +576,19 @@ class RecordMemberExprValidatorCCC : public CorrectionCandidateCallback {
return false;
}
- private:
+private:
const RecordDecl *const Record;
};
}
-static bool
-LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
- SourceRange BaseRange, const RecordType *RTy,
- SourceLocation OpLoc, CXXScopeSpec &SS,
- bool HasTemplateArgs) {
+static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
+ Expr *BaseExpr,
+ const RecordType *RTy,
+ SourceLocation OpLoc, bool IsArrow,
+ CXXScopeSpec &SS, bool HasTemplateArgs,
+ TypoExpr *&TE) {
+ SourceRange BaseRange = BaseExpr ? BaseExpr->getSourceRange() : SourceRange();
RecordDecl *RDecl = RTy->getDecl();
if (!SemaRef.isThisOutsideMemberFunctionBody(QualType(RTy, 0)) &&
SemaRef.RequireCompleteType(OpLoc, QualType(RTy, 0),
@@ -600,7 +613,7 @@ LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
if (SemaRef.RequireCompleteDeclContext(SS, DC)) {
SemaRef.Diag(SS.getRange().getEnd(), diag::err_typecheck_incomplete_tag)
- << SS.getRange() << DC;
+ << SS.getRange() << DC;
return true;
}
@@ -608,47 +621,48 @@ LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
if (!isa<TypeDecl>(DC)) {
SemaRef.Diag(R.getNameLoc(), diag::err_qualified_member_nonclass)
- << DC << SS.getRange();
+ << DC << SS.getRange();
return true;
}
}
// The record definition is complete, now look up the member.
- SemaRef.LookupQualifiedName(R, DC);
+ SemaRef.LookupQualifiedName(R, DC, SS);
if (!R.empty())
return false;
- // We didn't find anything with the given name, so try to correct
- // for typos.
- DeclarationName Name = R.getLookupName();
- RecordMemberExprValidatorCCC Validator(RTy);
- TypoCorrection Corrected = SemaRef.CorrectTypo(R.getLookupNameInfo(),
- R.getLookupKind(), nullptr,
- &SS, Validator,
- Sema::CTK_ErrorRecovery, DC);
- R.clear();
- if (Corrected.isResolved() && !Corrected.isKeyword()) {
- R.setLookupName(Corrected.getCorrection());
- for (TypoCorrection::decl_iterator DI = Corrected.begin(),
- DIEnd = Corrected.end();
- DI != DIEnd; ++DI) {
- R.addDecl(*DI);
- }
- R.resolveKind();
-
- // If we're typo-correcting to an overloaded name, we don't yet have enough
- // information to do overload resolution, so we don't know which previous
- // declaration to point to.
- if (Corrected.isOverloaded())
- Corrected.setCorrectionDecl(nullptr);
- bool DroppedSpecifier =
- Corrected.WillReplaceSpecifier() &&
- Name.getAsString() == Corrected.getAsString(SemaRef.getLangOpts());
- SemaRef.diagnoseTypo(Corrected,
- SemaRef.PDiag(diag::err_no_member_suggest)
- << Name << DC << DroppedSpecifier << SS.getRange());
- }
+ DeclarationName Typo = R.getLookupName();
+ SourceLocation TypoLoc = R.getNameLoc();
+ TE = SemaRef.CorrectTypoDelayed(
+ R.getLookupNameInfo(), R.getLookupKind(), nullptr, &SS,
+ llvm::make_unique<RecordMemberExprValidatorCCC>(RTy),
+ [=, &SemaRef](const TypoCorrection &TC) {
+ if (TC) {
+ assert(!TC.isKeyword() &&
+ "Got a keyword as a correction for a member!");
+ bool DroppedSpecifier =
+ TC.WillReplaceSpecifier() &&
+ Typo.getAsString() == TC.getAsString(SemaRef.getLangOpts());
+ SemaRef.diagnoseTypo(TC, SemaRef.PDiag(diag::err_no_member_suggest)
+ << Typo << DC << DroppedSpecifier
+ << SS.getRange());
+ } else {
+ SemaRef.Diag(TypoLoc, diag::err_no_member) << Typo << DC << BaseRange;
+ }
+ },
+ [=](Sema &SemaRef, TypoExpr *TE, TypoCorrection TC) mutable {
+ R.clear(); // Ensure there's no decls lingering in the shared state.
+ R.suppressDiagnostics();
+ R.setLookupName(TC.getCorrection());
+ for (NamedDecl *ND : TC)
+ R.addDecl(ND);
+ R.resolveKind();
+ return SemaRef.BuildMemberReferenceExpr(
+ BaseExpr, BaseExpr->getType(), OpLoc, IsArrow, SS, SourceLocation(),
+ nullptr, R, nullptr);
+ },
+ Sema::CTK_ErrorRecovery, DC);
return false;
}
@@ -678,12 +692,15 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
// Implicit member accesses.
if (!Base) {
+ TypoExpr *TE = nullptr;
QualType RecordTy = BaseType;
if (IsArrow) RecordTy = RecordTy->getAs<PointerType>()->getPointeeType();
- if (LookupMemberExprInRecord(*this, R, SourceRange(),
- RecordTy->getAs<RecordType>(),
- OpLoc, SS, TemplateArgs != nullptr))
+ if (LookupMemberExprInRecord(*this, R, nullptr,
+ RecordTy->getAs<RecordType>(), OpLoc, IsArrow,
+ SS, TemplateArgs != nullptr, TE))
return ExprError();
+ if (TE)
+ return TE;
// Explicit member accesses.
} else {
@@ -1211,13 +1228,16 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
// Handle field access to simple records.
if (const RecordType *RTy = BaseType->getAs<RecordType>()) {
- if (LookupMemberExprInRecord(S, R, BaseExpr.get()->getSourceRange(),
- RTy, OpLoc, SS, HasTemplateArgs))
+ TypoExpr *TE = nullptr;
+ if (LookupMemberExprInRecord(S, R, BaseExpr.get(), RTy,
+ OpLoc, IsArrow, SS, HasTemplateArgs, TE))
return ExprError();
// Returning valid-but-null is how we indicate to the caller that
- // the lookup result was filled in.
- return ExprResult((Expr *)nullptr);
+ // the lookup result was filled in. If typo correction was attempted and
+ // failed, the lookup result will have been cleared--that combined with the
+ // valid-but-null ExprResult will trigger the appropriate diagnostics.
+ return ExprResult(TE);
}
// Handle ivar access to Objective-C objects.
@@ -1262,11 +1282,11 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
if (!IV) {
// Attempt to correct for typos in ivar names.
- DeclFilterCCC<ObjCIvarDecl> Validator;
- Validator.IsObjCIvarLookup = IsArrow;
+ auto Validator = llvm::make_unique<DeclFilterCCC<ObjCIvarDecl>>();
+ Validator->IsObjCIvarLookup = IsArrow;
if (TypoCorrection Corrected = S.CorrectTypo(
R.getLookupNameInfo(), Sema::LookupMemberName, nullptr, nullptr,
- Validator, Sema::CTK_ErrorRecovery, IDecl)) {
+ std::move(Validator), Sema::CTK_ErrorRecovery, IDecl)) {
IV = Corrected.getCorrectionDeclAs<ObjCIvarDecl>();
S.diagnoseTypo(
Corrected,
diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp
index 500233203c79..9c3b51c623d3 100644
--- a/lib/Sema/SemaExprObjC.cpp
+++ b/lib/Sema/SemaExprObjC.cpp
@@ -944,7 +944,11 @@ ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
return ExprError();
std::string Str;
- Context.getObjCEncodingForType(EncodedType, Str);
+ QualType NotEncodedT;
+ Context.getObjCEncodingForType(EncodedType, Str, nullptr, &NotEncodedT);
+ if (!NotEncodedT.isNull())
+ Diag(AtLoc, diag::warn_incomplete_encoded_type)
+ << EncodedType << NotEncodedT;
// The type of @encode is the same as the type of the corresponding string,
// which is an array type.
@@ -983,7 +987,7 @@ static bool HelperToDiagnoseMismatchedMethodsInGlobalPool(Sema &S,
ObjCMethodList *M = &MethList;
bool Warned = false;
for (M = M->getNext(); M; M=M->getNext()) {
- ObjCMethodDecl *MatchingMethodDecl = M->Method;
+ ObjCMethodDecl *MatchingMethodDecl = M->getMethod();
if (MatchingMethodDecl == Method ||
isa<ObjCImplDecl>(MatchingMethodDecl->getDeclContext()) ||
MatchingMethodDecl->getSelector() != Method->getSelector())
@@ -1086,6 +1090,7 @@ ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
case OMF_mutableCopy:
case OMF_new:
case OMF_self:
+ case OMF_initialize:
case OMF_performSelector:
break;
}
@@ -1105,6 +1110,8 @@ ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
Diag(ProtoLoc, diag::err_undeclared_protocol) << ProtocolId;
return true;
}
+ if (PDecl->hasDefinition())
+ PDecl = PDecl->getDefinition();
QualType Ty = Context.getObjCProtoType();
if (Ty.isNull())
@@ -1222,12 +1229,8 @@ void Sema::EmitRelatedResultTypeNoteForReturn(QualType destType) {
// 'instancetype'.
if (const ObjCMethodDecl *overridden =
findExplicitInstancetypeDeclarer(MD, Context.getObjCInstanceType())) {
- SourceLocation loc;
- SourceRange range;
- if (TypeSourceInfo *TSI = overridden->getReturnTypeSourceInfo()) {
- range = TSI->getTypeLoc().getSourceRange();
- loc = range.getBegin();
- }
+ SourceRange range = overridden->getReturnTypeSourceRange();
+ SourceLocation loc = range.getBegin();
if (loc.isInvalid())
loc = overridden->getLocation();
Diag(loc, diag::note_related_result_type_explicit)
@@ -1276,6 +1279,7 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
+ SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK) {
SourceLocation SelLoc;
if (!SelectorLocs.empty() && SelectorLocs.front().isValid())
@@ -1317,9 +1321,12 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
: diag::warn_instance_method_not_found_with_typo;
Selector MatchedSel = OMD->getSelector();
SourceRange SelectorRange(SelectorLocs.front(), SelectorLocs.back());
- Diag(SelLoc, DiagID)
- << Sel<< isClassMessage << MatchedSel
- << FixItHint::CreateReplacement(SelectorRange, MatchedSel.getAsString());
+ if (MatchedSel.isUnarySelector())
+ Diag(SelLoc, DiagID)
+ << Sel<< isClassMessage << MatchedSel
+ << FixItHint::CreateReplacement(SelectorRange, MatchedSel.getAsString());
+ else
+ Diag(SelLoc, DiagID) << Sel<< isClassMessage << MatchedSel;
}
else
Diag(SelLoc, DiagID)
@@ -1327,9 +1334,15 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
SelectorLocs.back());
// Find the class to which we are sending this message.
if (ReceiverType->isObjCObjectPointerType()) {
- if (ObjCInterfaceDecl *Class =
- ReceiverType->getAs<ObjCObjectPointerType>()->getInterfaceDecl())
- Diag(Class->getLocation(), diag::note_receiver_class_declared);
+ if (ObjCInterfaceDecl *ThisClass =
+ ReceiverType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()) {
+ Diag(ThisClass->getLocation(), diag::note_receiver_class_declared);
+ if (!RecRange.isInvalid())
+ if (ThisClass->lookupClassMethod(Sel))
+ Diag(RecRange.getBegin(),diag::note_receiver_expr_here)
+ << FixItHint::CreateReplacement(RecRange,
+ ThisClass->getNameAsString());
+ }
}
}
@@ -1400,7 +1413,7 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
param);
- ExprResult ArgE = PerformCopyInitialization(Entity, SelLoc, argExpr);
+ ExprResult ArgE = PerformCopyInitialization(Entity, SourceLocation(), argExpr);
if (ArgE.isInvalid())
IsError = true;
else
@@ -1434,7 +1447,7 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
// Do additional checkings on method.
IsError |= CheckObjCMethodCall(
- Method, SelLoc, makeArrayRef<const Expr *>(Args.data(), Args.size()));
+ Method, SelLoc, makeArrayRef(Args.data(), Args.size()));
return IsError;
}
@@ -1651,6 +1664,22 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc))
return ExprError();
+ // Special warning if member name used in a property-dot for a setter accessor
+ // does not use a property with same name; e.g. obj.X = ... for a property with
+ // name 'x'.
+ if (Setter && Setter->isImplicit() && Setter->isPropertyAccessor()
+ && !IFace->FindPropertyDeclaration(Member)) {
+ if (const ObjCPropertyDecl *PDecl = Setter->findPropertyDecl()) {
+ // Do not warn if user is using property-dot syntax to make call to
+ // user named setter.
+ if (!(PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter))
+ Diag(MemberLoc,
+ diag::warn_property_access_suggest)
+ << MemberName << QualType(OPT, 0) << PDecl->getName()
+ << FixItHint::CreateReplacement(MemberLoc, PDecl->getName());
+ }
+ }
+
if (Getter || Setter) {
if (Super)
return new (Context)
@@ -1664,10 +1693,11 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
}
// Attempt to correct for typos in property names.
- DeclFilterCCC<ObjCPropertyDecl> Validator;
- if (TypoCorrection Corrected = CorrectTypo(
- DeclarationNameInfo(MemberName, MemberLoc), LookupOrdinaryName,
- nullptr, nullptr, Validator, CTK_ErrorRecovery, IFace, false, OPT)) {
+ if (TypoCorrection Corrected =
+ CorrectTypo(DeclarationNameInfo(MemberName, MemberLoc),
+ LookupOrdinaryName, nullptr, nullptr,
+ llvm::make_unique<DeclFilterCCC<ObjCPropertyDecl>>(),
+ CTK_ErrorRecovery, IFace, false, OPT)) {
diagnoseTypo(Corrected, PDiag(diag::err_property_not_found_suggest)
<< MemberName << QualType(OPT, 0));
DeclarationName TypoResult = Corrected.getCorrection();
@@ -1892,11 +1922,10 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
}
}
- ObjCInterfaceOrSuperCCC Validator(getCurMethodDecl());
- if (TypoCorrection Corrected =
- CorrectTypo(Result.getLookupNameInfo(), Result.getLookupKind(), S,
- nullptr, Validator, CTK_ErrorRecovery, nullptr, false,
- nullptr, false)) {
+ if (TypoCorrection Corrected = CorrectTypo(
+ Result.getLookupNameInfo(), Result.getLookupKind(), S, nullptr,
+ llvm::make_unique<ObjCInterfaceOrSuperCCC>(getCurMethodDecl()),
+ CTK_ErrorRecovery, nullptr, false, nullptr, false)) {
if (Corrected.isKeyword()) {
// If we've found the keyword "super" (the only keyword that would be
// returned by CorrectTypo), this is a send to super.
@@ -2034,6 +2063,45 @@ static void checkCocoaAPI(Sema &S, const ObjCMessageExpr *Msg) {
edit::rewriteObjCRedundantCallWithLiteral);
}
+/// \brief Diagnose use of %s directive in an NSString which is being passed
+/// as formatting string to formatting method.
+static void
+DiagnoseCStringFormatDirectiveInObjCAPI(Sema &S,
+ ObjCMethodDecl *Method,
+ Selector Sel,
+ Expr **Args, unsigned NumArgs) {
+ unsigned Idx = 0;
+ bool Format = false;
+ ObjCStringFormatFamily SFFamily = Sel.getStringFormatFamily();
+ if (SFFamily == ObjCStringFormatFamily::SFF_NSString) {
+ Idx = 0;
+ Format = true;
+ }
+ else if (Method) {
+ for (const auto *I : Method->specific_attrs<FormatAttr>()) {
+ if (S.GetFormatNSStringIdx(I, Idx)) {
+ Format = true;
+ break;
+ }
+ }
+ }
+ if (!Format || NumArgs <= Idx)
+ return;
+
+ Expr *FormatExpr = Args[Idx];
+ if (ObjCStringLiteral *OSL =
+ dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) {
+ StringLiteral *FormatString = OSL->getString();
+ if (S.FormatStringHasSArg(FormatString)) {
+ S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
+ << "%s" << 0 << 0;
+ if (Method)
+ S.Diag(Method->getLocation(), diag::note_method_declared_at)
+ << Method->getDeclName();
+ }
+ }
+}
+
/// \brief Build an Objective-C class message expression.
///
/// This routine takes care of both normal class messages and
@@ -2146,7 +2214,8 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
if (CheckMessageArgumentTypes(ReceiverType, MultiExprArg(Args, NumArgs),
Sel, SelectorLocs,
Method, true,
- SuperLoc.isValid(), LBracLoc, RBracLoc,
+ SuperLoc.isValid(), LBracLoc, RBracLoc,
+ SourceRange(),
ReturnType, VK))
return ExprError();
@@ -2154,7 +2223,32 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
RequireCompleteType(LBracLoc, Method->getReturnType(),
diag::err_illegal_message_expr_incomplete_type))
return ExprError();
-
+
+ // Warn about explicit call of +initialize on its own class. But not on 'super'.
+ if (Method && Method->getMethodFamily() == OMF_initialize) {
+ if (!SuperLoc.isValid()) {
+ const ObjCInterfaceDecl *ID =
+ dyn_cast<ObjCInterfaceDecl>(Method->getDeclContext());
+ if (ID == Class) {
+ Diag(Loc, diag::warn_direct_initialize_call);
+ Diag(Method->getLocation(), diag::note_method_declared_at)
+ << Method->getDeclName();
+ }
+ }
+ else if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) {
+ // [super initialize] is allowed only within an +initialize implementation
+ if (CurMeth->getMethodFamily() != OMF_initialize) {
+ Diag(Loc, diag::warn_direct_super_initialize_call);
+ Diag(Method->getLocation(), diag::note_method_declared_at)
+ << Method->getDeclName();
+ Diag(CurMeth->getLocation(), diag::note_method_declared_at)
+ << CurMeth->getDeclName();
+ }
+ }
+ }
+
+ DiagnoseCStringFormatDirectiveInObjCAPI(*this, Method, Sel, Args, NumArgs);
+
// Construct the appropriate ObjCMessageExpr.
ObjCMessageExpr *Result;
if (SuperLoc.isValid())
@@ -2354,11 +2448,19 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
Method = LookupFactoryMethodInGlobalPool(Sel,
SourceRange(LBracLoc,RBracLoc),
receiverIsId);
+ if (Method) {
+ if (ObjCMethodDecl *BestMethod =
+ SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod()))
+ Method = BestMethod;
+ if (!AreMultipleMethodsInGlobalPool(Sel, Method->isInstanceMethod()))
+ DiagnoseUseOfDecl(Method, SelLoc);
+ }
} else if (ReceiverType->isObjCClassType() ||
ReceiverType->isObjCQualifiedClassType()) {
// Handle messages to Class.
- // We allow sending a message to a qualified Class ("Class<foo>"), which
- // is ok as long as one of the protocols implements the selector (if not, warn).
+ // We allow sending a message to a qualified Class ("Class<foo>"), which
+ // is ok as long as one of the protocols implements the selector (if not,
+ // warn).
if (const ObjCObjectPointerType *QClassTy
= ReceiverType->getAsObjCQualifiedClassType()) {
// Search protocols for class methods.
@@ -2405,6 +2507,10 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
<< Sel << SourceRange(LBracLoc, RBracLoc);
}
}
+ if (Method)
+ if (ObjCMethodDecl *BestMethod =
+ SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod()))
+ Method = BestMethod;
}
}
}
@@ -2543,7 +2649,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (CheckMessageArgumentTypes(ReceiverType, MultiExprArg(Args, NumArgs),
Sel, SelectorLocs, Method,
ClassMessage, SuperLoc.isValid(),
- LBracLoc, RBracLoc, ReturnType, VK))
+ LBracLoc, RBracLoc, RecRange, ReturnType, VK))
return ExprError();
if (Method && !Method->getReturnType()->isVoidType() &&
@@ -2568,6 +2674,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
case OMF_mutableCopy:
case OMF_new:
case OMF_self:
+ case OMF_initialize:
break;
case OMF_dealloc:
@@ -2630,6 +2737,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
}
}
+ DiagnoseCStringFormatDirectiveInObjCAPI(*this, Method, Sel, Args, NumArgs);
+
// Construct the appropriate ObjCMessageExpr instance.
ObjCMessageExpr *Result;
if (SuperLoc.isValid())
@@ -3296,6 +3405,9 @@ static bool CheckObjCBridgeNSCast(Sema &S, QualType castType, Expr *castExpr,
if (TB *ObjCBAttr = getObjCBridgeAttr<TB>(TD)) {
if (IdentifierInfo *Parm = ObjCBAttr->getBridgedType()) {
HadTheAttribute = true;
+ if (Parm->isStr("id"))
+ return true;
+
NamedDecl *Target = nullptr;
// Check for an existing type with this name.
LookupResult R(S, DeclarationName(Parm), SourceLocation(),
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
index 06ca9aed6a87..569ef307474f 100644
--- a/lib/Sema/SemaInit.cpp
+++ b/lib/Sema/SemaInit.cpp
@@ -466,11 +466,15 @@ void InitListChecker::FillInEmptyInitForField(unsigned Init, FieldDecl *Field,
// members in the aggregate, then each member not explicitly initialized
// shall be initialized from its brace-or-equal-initializer [...]
if (Field->hasInClassInitializer()) {
- Expr *DIE = CXXDefaultInitExpr::Create(SemaRef.Context, Loc, Field);
+ ExprResult DIE = SemaRef.BuildCXXDefaultInitExpr(Loc, Field);
+ if (DIE.isInvalid()) {
+ hadError = true;
+ return;
+ }
if (Init < NumInits)
- ILE->setInit(Init, DIE);
+ ILE->setInit(Init, DIE.get());
else {
- ILE->updateInit(SemaRef.Context, Init, DIE);
+ ILE->updateInit(SemaRef.Context, Init, DIE.get());
RequiresSecondPass = true;
}
return;
@@ -1555,10 +1559,11 @@ void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity,
}
}
- // Value-initialize the first named member of the union.
+ // Value-initialize the first member of the union that isn't an unnamed
+ // bitfield.
for (RecordDecl::field_iterator FieldEnd = RD->field_end();
Field != FieldEnd; ++Field) {
- if (Field->getDeclName()) {
+ if (!Field->isUnnamedBitfield()) {
if (VerifyOnly)
CheckEmptyInitializable(
InitializedEntity::InitializeMember(*Field, &Entity),
@@ -1734,24 +1739,6 @@ static void ExpandAnonymousFieldDesignator(Sema &SemaRef,
&Replacements[0] + Replacements.size());
}
-/// \brief Given an implicit anonymous field, search the IndirectField that
-/// corresponds to FieldName.
-static IndirectFieldDecl *FindIndirectFieldDesignator(FieldDecl *AnonField,
- IdentifierInfo *FieldName) {
- if (!FieldName)
- return nullptr;
-
- assert(AnonField->isAnonymousStructOrUnion());
- Decl *NextDecl = AnonField->getNextDeclInContext();
- while (IndirectFieldDecl *IF =
- dyn_cast_or_null<IndirectFieldDecl>(NextDecl)) {
- if (FieldName == IF->getAnonField()->getIdentifier())
- return IF;
- NextDecl = NextDecl->getNextDeclInContext();
- }
- return nullptr;
-}
-
static DesignatedInitExpr *CloneDesignatedInitExpr(Sema &SemaRef,
DesignatedInitExpr *DIE) {
unsigned NumIndexExprs = DIE->getNumSubExprs() - 1;
@@ -1892,103 +1879,76 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
return true;
}
- // Note: we perform a linear search of the fields here, despite
- // the fact that we have a faster lookup method, because we always
- // need to compute the field's index.
FieldDecl *KnownField = D->getField();
- IdentifierInfo *FieldName = D->getFieldName();
- unsigned FieldIndex = 0;
- RecordDecl::field_iterator
- Field = RT->getDecl()->field_begin(),
- FieldEnd = RT->getDecl()->field_end();
- for (; Field != FieldEnd; ++Field) {
- if (Field->isUnnamedBitfield())
- continue;
-
- // If we find a field representing an anonymous field, look in the
- // IndirectFieldDecl that follow for the designated initializer.
- if (!KnownField && Field->isAnonymousStructOrUnion()) {
- if (IndirectFieldDecl *IF =
- FindIndirectFieldDesignator(*Field, FieldName)) {
+ if (!KnownField) {
+ IdentifierInfo *FieldName = D->getFieldName();
+ DeclContext::lookup_result Lookup = RT->getDecl()->lookup(FieldName);
+ for (NamedDecl *ND : Lookup) {
+ if (auto *FD = dyn_cast<FieldDecl>(ND)) {
+ KnownField = FD;
+ break;
+ }
+ if (auto *IFD = dyn_cast<IndirectFieldDecl>(ND)) {
// In verify mode, don't modify the original.
if (VerifyOnly)
DIE = CloneDesignatedInitExpr(SemaRef, DIE);
- ExpandAnonymousFieldDesignator(SemaRef, DIE, DesigIdx, IF);
+ ExpandAnonymousFieldDesignator(SemaRef, DIE, DesigIdx, IFD);
D = DIE->getDesignator(DesigIdx);
+ KnownField = cast<FieldDecl>(*IFD->chain_begin());
break;
}
}
- if (KnownField && KnownField == *Field)
- break;
- if (FieldName && FieldName == Field->getIdentifier())
- break;
-
- ++FieldIndex;
- }
+ if (!KnownField) {
+ if (VerifyOnly) {
+ ++Index;
+ return true; // No typo correction when just trying this out.
+ }
- if (Field == FieldEnd) {
- if (VerifyOnly) {
- ++Index;
- return true; // No typo correction when just trying this out.
- }
+ // Name lookup found something, but it wasn't a field.
+ if (!Lookup.empty()) {
+ SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_nonfield)
+ << FieldName;
+ SemaRef.Diag(Lookup.front()->getLocation(),
+ diag::note_field_designator_found);
+ ++Index;
+ return true;
+ }
- // There was no normal field in the struct with the designated
- // name. Perform another lookup for this name, which may find
- // something that we can't designate (e.g., a member function),
- // may find nothing, or may find a member of an anonymous
- // struct/union.
- DeclContext::lookup_result Lookup = RT->getDecl()->lookup(FieldName);
- FieldDecl *ReplacementField = nullptr;
- if (Lookup.empty()) {
- // Name lookup didn't find anything. Determine whether this
- // was a typo for another field name.
- FieldInitializerValidatorCCC Validator(RT->getDecl());
+ // Name lookup didn't find anything.
+ // Determine whether this was a typo for another field name.
if (TypoCorrection Corrected = SemaRef.CorrectTypo(
DeclarationNameInfo(FieldName, D->getFieldLoc()),
- Sema::LookupMemberName, /*Scope=*/ nullptr, /*SS=*/ nullptr,
- Validator, Sema::CTK_ErrorRecovery, RT->getDecl())) {
+ Sema::LookupMemberName, /*Scope=*/nullptr, /*SS=*/nullptr,
+ llvm::make_unique<FieldInitializerValidatorCCC>(RT->getDecl()),
+ Sema::CTK_ErrorRecovery, RT->getDecl())) {
SemaRef.diagnoseTypo(
Corrected,
SemaRef.PDiag(diag::err_field_designator_unknown_suggest)
- << FieldName << CurrentObjectType);
- ReplacementField = Corrected.getCorrectionDeclAs<FieldDecl>();
+ << FieldName << CurrentObjectType);
+ KnownField = Corrected.getCorrectionDeclAs<FieldDecl>();
hadError = true;
} else {
+ // Typo correction didn't find anything.
SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_unknown)
<< FieldName << CurrentObjectType;
++Index;
return true;
}
}
+ }
- if (!ReplacementField) {
- // Name lookup found something, but it wasn't a field.
- SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_nonfield)
- << FieldName;
- SemaRef.Diag(Lookup.front()->getLocation(),
- diag::note_field_designator_found);
- ++Index;
- return true;
- }
-
- if (!KnownField) {
- // The replacement field comes from typo correction; find it
- // in the list of fields.
- FieldIndex = 0;
- Field = RT->getDecl()->field_begin();
- for (; Field != FieldEnd; ++Field) {
- if (Field->isUnnamedBitfield())
- continue;
-
- if (ReplacementField == *Field ||
- Field->getIdentifier() == ReplacementField->getIdentifier())
- break;
-
- ++FieldIndex;
- }
- }
+ unsigned FieldIndex = 0;
+ for (auto *FI : RT->getDecl()->fields()) {
+ if (FI->isUnnamedBitfield())
+ continue;
+ if (KnownField == FI)
+ break;
+ ++FieldIndex;
}
+ RecordDecl::field_iterator Field =
+ RecordDecl::field_iterator(DeclContext::decl_iterator(KnownField));
+
// All of the fields of a union are located at the same place in
// the initializer list.
if (RT->getDecl()->isUnion()) {
@@ -5537,18 +5497,18 @@ static void performLifetimeExtension(Expr *Init,
static bool
performReferenceExtension(Expr *Init,
const InitializedEntity *ExtendingEntity) {
- if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
- if (ILE->getNumInits() == 1 && ILE->isGLValue()) {
- // This is just redundant braces around an initializer. Step over it.
- Init = ILE->getInit(0);
- }
- }
-
// Walk past any constructs which we can lifetime-extend across.
Expr *Old;
do {
Old = Init;
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ if (ILE->getNumInits() == 1 && ILE->isGLValue()) {
+ // This is just redundant braces around an initializer. Step over it.
+ Init = ILE->getInit(0);
+ }
+ }
+
// Step over any subobject adjustments; we may have a materialized
// temporary inside them.
SmallVector<const Expr *, 2> CommaLHSs;
@@ -6476,12 +6436,45 @@ static void diagnoseListInit(Sema &S, const InitializedEntity &Entity,
return diagnoseListInit(S, HiddenArray, InitList);
}
+ if (DestType->isReferenceType()) {
+ // A list-initialization failure for a reference means that we tried to
+ // create a temporary of the inner type (per [dcl.init.list]p3.6) and the
+ // inner initialization failed.
+ QualType T = DestType->getAs<ReferenceType>()->getPointeeType();
+ diagnoseListInit(S, InitializedEntity::InitializeTemporary(T), InitList);
+ SourceLocation Loc = InitList->getLocStart();
+ if (auto *D = Entity.getDecl())
+ Loc = D->getLocation();
+ S.Diag(Loc, diag::note_in_reference_temporary_list_initializer) << T;
+ return;
+ }
+
InitListChecker DiagnoseInitList(S, Entity, InitList, DestType,
/*VerifyOnly=*/false);
assert(DiagnoseInitList.HadError() &&
"Inconsistent init list check result.");
}
+/// Prints a fixit for adding a null initializer for |Entity|. Call this only
+/// right after emitting a diagnostic.
+static void maybeEmitZeroInitializationFixit(Sema &S,
+ InitializationSequence &Sequence,
+ const InitializedEntity &Entity) {
+ if (Entity.getKind() != InitializedEntity::EK_Variable)
+ return;
+
+ VarDecl *VD = cast<VarDecl>(Entity.getDecl());
+ if (VD->getInit() || VD->getLocEnd().isMacroID())
+ return;
+
+ QualType VariableTy = VD->getType().getCanonicalType();
+ SourceLocation Loc = S.getLocForEndOfToken(VD->getLocEnd());
+ std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc);
+
+ S.Diag(Loc, diag::note_add_initializer)
+ << VD << FixItHint::CreateInsertion(Loc, Init);
+}
+
bool InitializationSequence::Diagnose(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -6812,7 +6805,8 @@ bool InitializationSequence::Diagnose(Sema &S,
<< Entity.getName();
} else {
S.Diag(Kind.getLocation(), diag::err_default_init_const)
- << DestType << (bool)DestType->getAs<RecordType>();
+ << DestType << (bool)DestType->getAs<RecordType>();
+ maybeEmitZeroInitializationFixit(S, *this, Entity);
}
break;
diff --git a/lib/Sema/SemaLambda.cpp b/lib/Sema/SemaLambda.cpp
index 0cf4ed7c6081..90a81f4ec452 100644
--- a/lib/Sema/SemaLambda.cpp
+++ b/lib/Sema/SemaLambda.cpp
@@ -617,7 +617,7 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
// If it was ever a placeholder, it had to been deduced to DependentTy.
assert(CSI.ReturnType.isNull() || !CSI.ReturnType->isUndeducedType());
- // C++ Core Issue #975, proposed resolution:
+ // C++ core issue 975:
// If a lambda-expression does not include a trailing-return-type,
// it is as if the trailing-return-type denotes the following type:
// - if there are no return statements in the compound-statement,
@@ -631,6 +631,10 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
// same, that common type;
// - otherwise, the program is ill-formed.
//
+ // C++ core issue 1048 additionally removes top-level cv-qualifiers
+ // from the types of returned expressions to match the C++14 auto
+ // deduction rules.
+ //
// In addition, in blocks in non-C++ modes, if all of the return
// statements are enumerator-like expressions of some type T, where
// T has a name for linkage, then we infer the return type of the
@@ -679,7 +683,8 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
const ReturnStmt *RS = *I;
const Expr *RetE = RS->getRetValue();
- QualType ReturnType = (RetE ? RetE->getType() : Context.VoidTy);
+ QualType ReturnType =
+ (RetE ? RetE->getType() : Context.VoidTy).getUnqualifiedType();
if (Context.hasSameType(ReturnType, CSI.ReturnType))
continue;
@@ -873,7 +878,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// We don't do this before C++1y, because we don't support deduced return
// types there.
QualType DefaultTypeForNoTrailingReturn =
- getLangOpts().CPlusPlus1y ? Context.getAutoDeductType()
+ getLangOpts().CPlusPlus14 ? Context.getAutoDeductType()
: Context.DependentTy;
QualType MethodTy =
Context.getFunctionType(DefaultTypeForNoTrailingReturn, None, EPI);
@@ -999,7 +1004,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
VarDecl *Var = nullptr;
if (C->Init.isUsable()) {
- Diag(C->Loc, getLangOpts().CPlusPlus1y
+ Diag(C->Loc, getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_init_capture
: diag::ext_init_capture);
@@ -1049,8 +1054,8 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
if (R.empty()) {
// FIXME: Disable corrections that would add qualification?
CXXScopeSpec ScopeSpec;
- DeclFilterCCC<VarDecl> Validator;
- if (DiagnoseEmptyLookup(CurScope, ScopeSpec, R, Validator))
+ if (DiagnoseEmptyLookup(CurScope, ScopeSpec, R,
+ llvm::make_unique<DeclFilterCCC<VarDecl>>()))
continue;
}
@@ -1062,7 +1067,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// C++11 [expr.prim.lambda]p8:
// An identifier or this shall not appear more than once in a
// lambda-capture.
- if (!CaptureNames.insert(C->Id)) {
+ if (!CaptureNames.insert(C->Id).second) {
if (Var && LSI->isCaptured(Var)) {
Diag(C->Loc, diag::err_capture_more_than_once)
<< C->Id << SourceRange(LSI->getCapture(Var).getLocation())
@@ -1414,6 +1419,12 @@ ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
/*isImplicit=*/true));
continue;
}
+ if (From.isVLATypeCapture()) {
+ Captures.push_back(
+ LambdaCapture(From.getLocation(), IsImplicit, LCK_VLAType));
+ CaptureInits.push_back(nullptr);
+ continue;
+ }
VarDecl *Var = From.getVariable();
LambdaCaptureKind Kind = From.isCopyCapture()? LCK_ByCopy : LCK_ByRef;
@@ -1451,7 +1462,7 @@ ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
// different machinery.
// FIXME: Refactor and Merge the return type deduction machinery.
// FIXME: Assumes current resolution to core issue 975.
- if (LSI->HasImplicitReturnType && !getLangOpts().CPlusPlus1y) {
+ if (LSI->HasImplicitReturnType && !getLangOpts().CPlusPlus14) {
deduceClosureReturnType(*LSI);
// - if there are no return statements in the
diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp
index fe2c8161b871..3445264461f7 100644
--- a/lib/Sema/SemaLookup.cpp
+++ b/lib/Sema/SemaLookup.cpp
@@ -128,7 +128,7 @@ namespace {
// that contexts be visited from the inside out in order to get
// the effective DCs right.
void visit(DeclContext *DC, DeclContext *EffectiveDC) {
- if (!visited.insert(DC))
+ if (!visited.insert(DC).second)
return;
addUsingDirectives(DC, EffectiveDC);
@@ -139,7 +139,7 @@ namespace {
// were declared in the effective DC.
void visit(UsingDirectiveDecl *UD, DeclContext *EffectiveDC) {
DeclContext *NS = UD->getNominatedNamespace();
- if (!visited.insert(NS))
+ if (!visited.insert(NS).second)
return;
addUsingDirective(UD, EffectiveDC);
@@ -154,7 +154,7 @@ namespace {
while (true) {
for (auto UD : DC->using_directives()) {
DeclContext *NS = UD->getNominatedNamespace();
- if (visited.insert(NS)) {
+ if (visited.insert(NS).second) {
addUsingDirective(UD, EffectiveDC);
queue.push_back(NS);
}
@@ -285,7 +285,7 @@ static inline unsigned getIDNS(Sema::LookupNameKind NameKind,
}
void LookupResult::configure() {
- IDNS = getIDNS(LookupKind, SemaRef.getLangOpts().CPlusPlus,
+ IDNS = getIDNS(LookupKind, getSema().getLangOpts().CPlusPlus,
isForRedeclaration());
// If we're looking for one of the allocation or deallocation
@@ -296,7 +296,7 @@ void LookupResult::configure() {
case OO_Delete:
case OO_Array_New:
case OO_Array_Delete:
- SemaRef.DeclareGlobalNewDelete();
+ getSema().DeclareGlobalNewDelete();
break;
default:
@@ -307,7 +307,7 @@ void LookupResult::configure() {
// up being declared.
if (IdentifierInfo *Id = NameInfo.getName().getAsIdentifierInfo()) {
if (unsigned BuiltinID = Id->getBuiltinID()) {
- if (!SemaRef.Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ if (!getSema().Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
AllowHidden = true;
}
}
@@ -400,8 +400,8 @@ void LookupResult::resolveKind() {
// canonical type.
if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
if (!TD->getDeclContext()->isRecord()) {
- QualType T = SemaRef.Context.getTypeDeclType(TD);
- if (!UniqueTypes.insert(SemaRef.Context.getCanonicalType(T))) {
+ QualType T = getSema().Context.getTypeDeclType(TD);
+ if (!UniqueTypes.insert(getSema().Context.getCanonicalType(T)).second) {
// The type is not unique; pull something off the back and continue
// at this index.
Decls[I] = Decls[--N];
@@ -410,7 +410,7 @@ void LookupResult::resolveKind() {
}
}
- if (!Unique.insert(D)) {
+ if (!Unique.insert(D).second) {
// If it's not unique, pull something off the back (and
// continue at this index).
Decls[I] = Decls[--N];
@@ -735,8 +735,7 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
// FIXME: Calling convention!
FunctionProtoType::ExtProtoInfo EPI = ConvProto->getExtProtoInfo();
EPI.ExtInfo = EPI.ExtInfo.withCallingConv(CC_C);
- EPI.ExceptionSpecType = EST_None;
- EPI.NumExceptions = 0;
+ EPI.ExceptionSpec = EST_None;
QualType ExpectedType
= R.getSema().Context.getFunctionType(R.getLookupName().getCXXNameType(),
None, EPI);
@@ -1176,21 +1175,8 @@ static Module *getDefiningModule(Decl *Entity) {
if (FunctionDecl *Pattern = FD->getTemplateInstantiationPattern())
Entity = Pattern;
} else if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Entity)) {
- // If it's a class template specialization, find the template or partial
- // specialization from which it was instantiated.
- if (ClassTemplateSpecializationDecl *SpecRD =
- dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
- llvm::PointerUnion<ClassTemplateDecl*,
- ClassTemplatePartialSpecializationDecl*> From =
- SpecRD->getInstantiatedFrom();
- if (ClassTemplateDecl *FromTemplate = From.dyn_cast<ClassTemplateDecl*>())
- Entity = FromTemplate->getTemplatedDecl();
- else if (From)
- Entity = From.get<ClassTemplatePartialSpecializationDecl*>();
- // Otherwise, it's an explicit specialization.
- } else if (MemberSpecializationInfo *MSInfo =
- RD->getMemberSpecializationInfo())
- Entity = getInstantiatedFrom(RD, MSInfo);
+ if (CXXRecordDecl *Pattern = RD->getTemplateInstantiationPattern())
+ Entity = Pattern;
} else if (EnumDecl *ED = dyn_cast<EnumDecl>(Entity)) {
if (MemberSpecializationInfo *MSInfo = ED->getMemberSpecializationInfo())
Entity = getInstantiatedFrom(ED, MSInfo);
@@ -1279,7 +1265,7 @@ static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D) {
}
NamedDecl *LookupResult::getAcceptableDeclSlow(NamedDecl *D) const {
- return findAcceptableDecl(SemaRef, D);
+ return findAcceptableDecl(getSema(), D);
}
/// @brief Perform unqualified name lookup starting from a given
@@ -1466,7 +1452,7 @@ static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
// with its using-children.
for (auto *I : UsingDirectives) {
NamespaceDecl *ND = I->getNominatedNamespace()->getOriginalNamespace();
- if (Visited.insert(ND))
+ if (Visited.insert(ND).second)
Queue.push_back(ND);
}
@@ -1514,7 +1500,7 @@ static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
for (auto I : ND->using_directives()) {
NamespaceDecl *Nom = I->getNominatedNamespace();
- if (Visited.insert(Nom))
+ if (Visited.insert(Nom).second)
Queue.push_back(Nom);
}
}
@@ -1776,6 +1762,31 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
return true;
}
+/// \brief Performs qualified name lookup or special type of lookup for
+/// "__super::" scope specifier.
+///
+/// This routine is a convenience overload meant to be called from contexts
+/// that need to perform a qualified name lookup with an optional C++ scope
+/// specifier that might require special kind of lookup.
+///
+/// \param R captures both the lookup criteria and any lookup results found.
+///
+/// \param LookupCtx The context in which qualified name lookup will
+/// search.
+///
+/// \param SS An optional C++ scope-specifier.
+///
+/// \returns true if lookup succeeded, false if it failed.
+bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
+ CXXScopeSpec &SS) {
+ auto *NNS = SS.getScopeRep();
+ if (NNS && NNS->getKind() == NestedNameSpecifier::Super)
+ return LookupInSuper(R, NNS->getAsRecordDecl());
+ else
+
+ return LookupQualifiedName(R, LookupCtx);
+}
+
/// @brief Performs name lookup for a name that was parsed in the
/// source code, and may contain a C++ scope specifier.
///
@@ -1783,7 +1794,8 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
/// contexts that receive a name and an optional C++ scope specifier
/// (e.g., "N::M::x"). It will then perform either qualified or
/// unqualified name lookup (with LookupQualifiedName or LookupName,
-/// respectively) on the given name and return those results.
+/// respectively) on the given name and return those results. It will
+/// perform a special type of lookup for "__super::" scope specifier.
///
/// @param S The scope from which unqualified name lookup will
/// begin.
@@ -1803,6 +1815,10 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
}
if (SS && SS->isSet()) {
+ NestedNameSpecifier *NNS = SS->getScopeRep();
+ if (NNS->getKind() == NestedNameSpecifier::Super)
+ return LookupInSuper(R, NNS->getAsRecordDecl());
+
if (DeclContext *DC = computeDeclContext(*SS, EnteringContext)) {
// We have resolved the scope specifier to a particular declaration
// contex, and will perform name lookup in that context.
@@ -1825,6 +1841,30 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
return LookupName(R, S, AllowBuiltinCreation);
}
+/// \brief Perform qualified name lookup into all base classes of the given
+/// class.
+///
+/// \param R captures both the lookup criteria and any lookup results found.
+///
+/// \param Class The context in which qualified name lookup will
+/// search. Name lookup will search in all base classes merging the results.
+///
+/// @returns True if any decls were found (but possibly ambiguous)
+bool Sema::LookupInSuper(LookupResult &R, CXXRecordDecl *Class) {
+ for (const auto &BaseSpec : Class->bases()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(
+ BaseSpec.getType()->castAs<RecordType>()->getDecl());
+ LookupResult Result(*this, R.getLookupNameInfo(), R.getLookupKind());
+ Result.setBaseObjectType(Context.getRecordType(Class));
+ LookupQualifiedName(Result, RD);
+ for (auto *Decl : Result)
+ R.addDecl(Decl);
+ }
+
+ R.resolveKind();
+
+ return !R.empty();
+}
/// \brief Produce a diagnostic describing the ambiguity that resulted
/// from name lookup.
@@ -2024,7 +2064,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
// FIXME: That's not correct, we may have added this class only because it
// was the enclosing class of another class, and in that case we won't have
// added its base classes yet.
- if (!Result.Classes.insert(Class))
+ if (!Result.Classes.insert(Class).second)
return;
// -- If T is a template-id, its associated namespaces and classes are
@@ -2073,7 +2113,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
if (!BaseType)
continue;
CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(BaseType->getDecl());
- if (Result.Classes.insert(BaseDecl)) {
+ if (Result.Classes.insert(BaseDecl).second) {
// Find the associated namespace for this base class.
DeclContext *BaseCtx = BaseDecl->getDeclContext();
CollectEnclosingNamespace(Result.Namespaces, BaseCtx);
@@ -2875,7 +2915,7 @@ public:
/// \brief Determine whether we have already visited this context
/// (and, if not, note that we are going to visit that context now).
bool visitedContext(DeclContext *Ctx) {
- return !VisitedContexts.insert(Ctx);
+ return !VisitedContexts.insert(Ctx).second;
}
bool alreadyVisitedContext(DeclContext *Ctx) {
@@ -3263,6 +3303,49 @@ static void LookupPotentialTypoResult(Sema &SemaRef,
bool isObjCIvarLookup,
bool FindHidden);
+/// \brief Check whether the declarations found for a typo correction are
+/// visible, and if none of them are, convert the correction to an 'import
+/// a module' correction.
+static void checkCorrectionVisibility(Sema &SemaRef, TypoCorrection &TC) {
+ if (TC.begin() == TC.end())
+ return;
+
+ TypoCorrection::decl_iterator DI = TC.begin(), DE = TC.end();
+
+ for (/**/; DI != DE; ++DI)
+ if (!LookupResult::isVisible(SemaRef, *DI))
+ break;
+ // Nothing to do if all decls are visible.
+ if (DI == DE)
+ return;
+
+ llvm::SmallVector<NamedDecl*, 4> NewDecls(TC.begin(), DI);
+ bool AnyVisibleDecls = !NewDecls.empty();
+
+ for (/**/; DI != DE; ++DI) {
+ NamedDecl *VisibleDecl = *DI;
+ if (!LookupResult::isVisible(SemaRef, *DI))
+ VisibleDecl = findAcceptableDecl(SemaRef, *DI);
+
+ if (VisibleDecl) {
+ if (!AnyVisibleDecls) {
+ // Found a visible decl, discard all hidden ones.
+ AnyVisibleDecls = true;
+ NewDecls.clear();
+ }
+ NewDecls.push_back(VisibleDecl);
+ } else if (!AnyVisibleDecls && !(*DI)->isModulePrivate())
+ NewDecls.push_back(*DI);
+ }
+
+ if (NewDecls.empty())
+ TC = TypoCorrection();
+ else {
+ TC.setCorrectionDecls(NewDecls);
+ TC.setRequiresImport(!AnyVisibleDecls);
+ }
+}
+
// Fill the supplied vector with the IdentifierInfo pointers for each piece of
// the given NestedNameSpecifier (i.e. given a NestedNameSpecifier "foo::bar::",
// fill the vector with the IdentifierInfo pointers for "foo" and "bar").
@@ -3297,6 +3380,7 @@ static void getNestedNameSpecifierIdentifiers(
break;
case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
return;
}
@@ -3304,157 +3388,6 @@ static void getNestedNameSpecifierIdentifiers(
Identifiers.push_back(II);
}
-namespace {
-
-static const unsigned MaxTypoDistanceResultSets = 5;
-
-class TypoCorrectionConsumer : public VisibleDeclConsumer {
- typedef SmallVector<TypoCorrection, 1> TypoResultList;
- typedef llvm::StringMap<TypoResultList> TypoResultsMap;
- typedef std::map<unsigned, TypoResultsMap> TypoEditDistanceMap;
-
-public:
- explicit TypoCorrectionConsumer(Sema &SemaRef,
- const DeclarationNameInfo &TypoName,
- Sema::LookupNameKind LookupKind,
- Scope *S, CXXScopeSpec *SS,
- CorrectionCandidateCallback &CCC,
- DeclContext *MemberContext,
- bool EnteringContext)
- : Typo(TypoName.getName().getAsIdentifierInfo()), SemaRef(SemaRef), S(S),
- SS(SS), CorrectionValidator(CCC), MemberContext(MemberContext),
- Result(SemaRef, TypoName, LookupKind),
- Namespaces(SemaRef.Context, SemaRef.CurContext, SS),
- EnteringContext(EnteringContext), SearchNamespaces(false) {
- Result.suppressDiagnostics();
- }
-
- bool includeHiddenDecls() const override { return true; }
-
- // Methods for adding potential corrections to the consumer.
- void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
- bool InBaseClass) override;
- void FoundName(StringRef Name);
- void addKeywordResult(StringRef Keyword);
- void addCorrection(TypoCorrection Correction);
-
- bool empty() const { return CorrectionResults.empty(); }
-
- /// \brief Return the list of TypoCorrections for the given identifier from
- /// the set of corrections that have the closest edit distance, if any.
- TypoResultList &operator[](StringRef Name) {
- return CorrectionResults.begin()->second[Name];
- }
-
- /// \brief Return the edit distance of the corrections that have the
- /// closest/best edit distance from the original typop.
- unsigned getBestEditDistance(bool Normalized) {
- if (CorrectionResults.empty())
- return (std::numeric_limits<unsigned>::max)();
-
- unsigned BestED = CorrectionResults.begin()->first;
- return Normalized ? TypoCorrection::NormalizeEditDistance(BestED) : BestED;
- }
-
- /// \brief Set-up method to add to the consumer the set of namespaces to use
- /// in performing corrections to nested name specifiers. This method also
- /// implicitly adds all of the known classes in the current AST context to the
- /// to the consumer for correcting nested name specifiers.
- void
- addNamespaces(const llvm::MapVector<NamespaceDecl *, bool> &KnownNamespaces);
-
- /// \brief Return the next typo correction that passes all internal filters
- /// and is deemed valid by the consumer's CorrectionCandidateCallback,
- /// starting with the corrections that have the closest edit distance. An
- /// empty TypoCorrection is returned once no more viable corrections remain
- /// in the consumer.
- TypoCorrection getNextCorrection();
-
-private:
- class NamespaceSpecifierSet {
- struct SpecifierInfo {
- DeclContext* DeclCtx;
- NestedNameSpecifier* NameSpecifier;
- unsigned EditDistance;
- };
-
- typedef SmallVector<DeclContext*, 4> DeclContextList;
- typedef SmallVector<SpecifierInfo, 16> SpecifierInfoList;
-
- ASTContext &Context;
- DeclContextList CurContextChain;
- std::string CurNameSpecifier;
- SmallVector<const IdentifierInfo*, 4> CurContextIdentifiers;
- SmallVector<const IdentifierInfo*, 4> CurNameSpecifierIdentifiers;
- bool isSorted;
-
- SpecifierInfoList Specifiers;
- llvm::SmallSetVector<unsigned, 4> Distances;
- llvm::DenseMap<unsigned, SpecifierInfoList> DistanceMap;
-
- /// \brief Helper for building the list of DeclContexts between the current
- /// context and the top of the translation unit
- static DeclContextList buildContextChain(DeclContext *Start);
-
- void sortNamespaces();
-
- unsigned buildNestedNameSpecifier(DeclContextList &DeclChain,
- NestedNameSpecifier *&NNS);
-
- public:
- NamespaceSpecifierSet(ASTContext &Context, DeclContext *CurContext,
- CXXScopeSpec *CurScopeSpec);
-
- /// \brief Add the DeclContext (a namespace or record) to the set, computing
- /// the corresponding NestedNameSpecifier and its distance in the process.
- void addNameSpecifier(DeclContext *Ctx);
-
- typedef SpecifierInfoList::iterator iterator;
- iterator begin() {
- if (!isSorted) sortNamespaces();
- return Specifiers.begin();
- }
- iterator end() { return Specifiers.end(); }
- };
-
- void addName(StringRef Name, NamedDecl *ND,
- NestedNameSpecifier *NNS = nullptr, bool isKeyword = false);
-
- /// \brief Find any visible decls for the given typo correction candidate.
- /// If none are found, it to the set of candidates for which qualified lookups
- /// will be performed to find possible nested name specifier changes.
- bool resolveCorrection(TypoCorrection &Candidate);
-
- /// \brief Perform qualified lookups on the queued set of typo correction
- /// candidates and add the nested name specifier changes to each candidate if
- /// a lookup succeeds (at which point the candidate will be returned to the
- /// main pool of potential corrections).
- void performQualifiedLookups();
-
- /// \brief The name written that is a typo in the source.
- IdentifierInfo *Typo;
-
- /// \brief The results found that have the smallest edit distance
- /// found (so far) with the typo name.
- ///
- /// The pointer value being set to the current DeclContext indicates
- /// whether there is a keyword with this name.
- TypoEditDistanceMap CorrectionResults;
-
- Sema &SemaRef;
- Scope *S;
- CXXScopeSpec *SS;
- CorrectionCandidateCallback &CorrectionValidator;
- DeclContext *MemberContext;
- LookupResult Result;
- NamespaceSpecifierSet Namespaces;
- SmallVector<TypoCorrection, 2> QualifiedResults;
- bool EnteringContext;
- bool SearchNamespaces;
-};
-
-}
-
void TypoCorrectionConsumer::FoundDecl(NamedDecl *ND, NamedDecl *Hiding,
DeclContext *Ctx, bool InBaseClass) {
// Don't consider hidden names for typo correction.
@@ -3506,9 +3439,12 @@ void TypoCorrectionConsumer::addName(StringRef Name, NamedDecl *ND,
TypoCorrection TC(&SemaRef.Context.Idents.get(Name), ND, NNS, ED);
if (isKeyword) TC.makeKeyword();
+ TC.setCorrectionRange(nullptr, Result.getLookupNameInfo());
addCorrection(TC);
}
+static const unsigned MaxTypoDistanceResultSets = 5;
+
void TypoCorrectionConsumer::addCorrection(TypoCorrection Correction) {
StringRef TypoStr = Typo->getName();
StringRef Name = Correction.getCorrectionAsIdentifierInfo()->getName();
@@ -3521,9 +3457,11 @@ void TypoCorrectionConsumer::addCorrection(TypoCorrection Correction) {
return;
// If the correction is resolved but is not viable, ignore it.
- if (Correction.isResolved() &&
- !isCandidateViable(CorrectionValidator, Correction))
- return;
+ if (Correction.isResolved()) {
+ checkCorrectionVisibility(SemaRef, Correction);
+ if (!Correction || !isCandidateViable(*CorrectionValidator, Correction))
+ return;
+ }
TypoResultList &CList =
CorrectionResults[Correction.getEditDistance(false)][Name];
@@ -3577,7 +3515,11 @@ void TypoCorrectionConsumer::addNamespaces(
}
}
-TypoCorrection TypoCorrectionConsumer::getNextCorrection() {
+const TypoCorrection &TypoCorrectionConsumer::getNextCorrection() {
+ if (++CurrentTCIndex < ValidatedCorrections.size())
+ return ValidatedCorrections[CurrentTCIndex];
+
+ CurrentTCIndex = ValidatedCorrections.size();
while (!CorrectionResults.empty()) {
auto DI = CorrectionResults.begin();
if (DI->second.empty()) {
@@ -3593,20 +3535,22 @@ TypoCorrection TypoCorrectionConsumer::getNextCorrection() {
}
TypoCorrection TC = RI->second.pop_back_val();
- if (TC.isResolved() || resolveCorrection(TC))
- return TC;
+ if (TC.isResolved() || TC.requiresImport() || resolveCorrection(TC)) {
+ ValidatedCorrections.push_back(TC);
+ return ValidatedCorrections[CurrentTCIndex];
+ }
}
- return TypoCorrection();
+ return ValidatedCorrections[0]; // The empty correction.
}
bool TypoCorrectionConsumer::resolveCorrection(TypoCorrection &Candidate) {
IdentifierInfo *Name = Candidate.getCorrectionAsIdentifierInfo();
DeclContext *TempMemberContext = MemberContext;
- CXXScopeSpec *TempSS = SS;
+ CXXScopeSpec *TempSS = SS.get();
retry_lookup:
LookupPotentialTypoResult(SemaRef, Result, Name, S, TempSS, TempMemberContext,
EnteringContext,
- CorrectionValidator.IsObjCIvarLookup,
+ CorrectionValidator->IsObjCIvarLookup,
Name == Typo && !Candidate.WillReplaceSpecifier());
switch (Result.getResultKind()) {
case LookupResult::NotFound:
@@ -3620,7 +3564,7 @@ retry_lookup:
}
if (TempMemberContext) {
if (SS && !TempSS)
- TempSS = SS;
+ TempSS = SS.get();
TempMemberContext = nullptr;
goto retry_lookup;
}
@@ -3637,11 +3581,13 @@ retry_lookup:
// Store all of the Decls for overloaded symbols
for (auto *TRD : Result)
Candidate.addCorrectionDecl(TRD);
- if (!isCandidateViable(CorrectionValidator, Candidate)) {
+ checkCorrectionVisibility(SemaRef, Candidate);
+ if (!isCandidateViable(*CorrectionValidator, Candidate)) {
if (SearchNamespaces)
QualifiedResults.push_back(Candidate);
break;
}
+ Candidate.setCorrectionRange(TempSS, Result.getLookupNameInfo());
return true;
}
return false;
@@ -3707,8 +3653,10 @@ void TypoCorrectionConsumer::performQualifiedLookups() {
TRD.getPair()) == Sema::AR_accessible)
TC.addCorrectionDecl(*TRD);
}
- if (TC.isResolved())
+ if (TC.isResolved()) {
+ TC.setCorrectionRange(SS.get(), Result.getLookupNameInfo());
addCorrection(TC);
+ }
break;
}
case LookupResult::NotFound:
@@ -3856,8 +3804,8 @@ void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
SmallVector<const IdentifierInfo*, 4> NewNameSpecifierIdentifiers;
getNestedNameSpecifierIdentifiers(NNS, NewNameSpecifierIdentifiers);
NumSpecifiers = llvm::ComputeEditDistance(
- ArrayRef<const IdentifierInfo *>(CurNameSpecifierIdentifiers),
- ArrayRef<const IdentifierInfo *>(NewNameSpecifierIdentifiers));
+ llvm::makeArrayRef(CurNameSpecifierIdentifiers),
+ llvm::makeArrayRef(NewNameSpecifierIdentifiers));
}
isSorted = false;
@@ -3972,6 +3920,13 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
if (SemaRef.getLangOpts().GNUMode)
Consumer.addKeywordResult("typeof");
+ } else if (CCC.WantFunctionLikeCasts) {
+ static const char *const CastableTypeSpecs[] = {
+ "char", "double", "float", "int", "long", "short",
+ "signed", "unsigned", "void"
+ };
+ for (auto *kw : CastableTypeSpecs)
+ Consumer.addKeywordResult(kw);
}
if (CCC.WantCXXNamedCasts && SemaRef.getLangOpts().CPlusPlus) {
@@ -4063,212 +4018,96 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
}
}
-/// \brief Check whether the declarations found for a typo correction are
-/// visible, and if none of them are, convert the correction to an 'import
-/// a module' correction.
-static void checkCorrectionVisibility(Sema &SemaRef, TypoCorrection &TC) {
- if (TC.begin() == TC.end())
- return;
-
- TypoCorrection::decl_iterator DI = TC.begin(), DE = TC.end();
-
- for (/**/; DI != DE; ++DI)
- if (!LookupResult::isVisible(SemaRef, *DI))
- break;
- // Nothing to do if all decls are visible.
- if (DI == DE)
- return;
-
- llvm::SmallVector<NamedDecl*, 4> NewDecls(TC.begin(), DI);
- bool AnyVisibleDecls = !NewDecls.empty();
-
- for (/**/; DI != DE; ++DI) {
- NamedDecl *VisibleDecl = *DI;
- if (!LookupResult::isVisible(SemaRef, *DI))
- VisibleDecl = findAcceptableDecl(SemaRef, *DI);
-
- if (VisibleDecl) {
- if (!AnyVisibleDecls) {
- // Found a visible decl, discard all hidden ones.
- AnyVisibleDecls = true;
- NewDecls.clear();
- }
- NewDecls.push_back(VisibleDecl);
- } else if (!AnyVisibleDecls && !(*DI)->isModulePrivate())
- NewDecls.push_back(*DI);
- }
-
- if (NewDecls.empty())
- TC = TypoCorrection();
- else {
- TC.setCorrectionDecls(NewDecls);
- TC.setRequiresImport(!AnyVisibleDecls);
- }
-}
-
-/// \brief Try to "correct" a typo in the source code by finding
-/// visible declarations whose names are similar to the name that was
-/// present in the source code.
-///
-/// \param TypoName the \c DeclarationNameInfo structure that contains
-/// the name that was present in the source code along with its location.
-///
-/// \param LookupKind the name-lookup criteria used to search for the name.
-///
-/// \param S the scope in which name lookup occurs.
-///
-/// \param SS the nested-name-specifier that precedes the name we're
-/// looking for, if present.
-///
-/// \param CCC A CorrectionCandidateCallback object that provides further
-/// validation of typo correction candidates. It also provides flags for
-/// determining the set of keywords permitted.
-///
-/// \param MemberContext if non-NULL, the context in which to look for
-/// a member access expression.
-///
-/// \param EnteringContext whether we're entering the context described by
-/// the nested-name-specifier SS.
-///
-/// \param OPT when non-NULL, the search for visible declarations will
-/// also walk the protocols in the qualified interfaces of \p OPT.
-///
-/// \returns a \c TypoCorrection containing the corrected name if the typo
-/// along with information such as the \c NamedDecl where the corrected name
-/// was declared, and any additional \c NestedNameSpecifier needed to access
-/// it (C++ only). The \c TypoCorrection is empty if there is no correction.
-TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
- Sema::LookupNameKind LookupKind,
- Scope *S, CXXScopeSpec *SS,
- CorrectionCandidateCallback &CCC,
- CorrectTypoKind Mode,
- DeclContext *MemberContext,
- bool EnteringContext,
- const ObjCObjectPointerType *OPT,
- bool RecordFailure) {
- // Always let the ExternalSource have the first chance at correction, even
- // if we would otherwise have given up.
- if (ExternalSource) {
- if (TypoCorrection Correction = ExternalSource->CorrectTypo(
- TypoName, LookupKind, S, SS, CCC, MemberContext, EnteringContext, OPT))
- return Correction;
- }
+std::unique_ptr<TypoCorrectionConsumer> Sema::makeTypoCorrectionConsumer(
+ const DeclarationNameInfo &TypoName, Sema::LookupNameKind LookupKind,
+ Scope *S, CXXScopeSpec *SS,
+ std::unique_ptr<CorrectionCandidateCallback> CCC,
+ DeclContext *MemberContext, bool EnteringContext,
+ const ObjCObjectPointerType *OPT, bool ErrorRecovery) {
if (Diags.hasFatalErrorOccurred() || !getLangOpts().SpellChecking ||
DisableTypoCorrection)
- return TypoCorrection();
+ return nullptr;
// In Microsoft mode, don't perform typo correction in a template member
// function dependent context because it interferes with the "lookup into
// dependent bases of class templates" feature.
if (getLangOpts().MSVCCompat && CurContext->isDependentContext() &&
isa<CXXMethodDecl>(CurContext))
- return TypoCorrection();
+ return nullptr;
// We only attempt to correct typos for identifiers.
IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo();
if (!Typo)
- return TypoCorrection();
+ return nullptr;
// If the scope specifier itself was invalid, don't try to correct
// typos.
if (SS && SS->isInvalid())
- return TypoCorrection();
+ return nullptr;
// Never try to correct typos during template deduction or
// instantiation.
if (!ActiveTemplateInstantiations.empty())
- return TypoCorrection();
+ return nullptr;
// Don't try to correct 'super'.
if (S && S->isInObjcMethodScope() && Typo == getSuperIdentifier())
- return TypoCorrection();
+ return nullptr;
// Abort if typo correction already failed for this specific typo.
IdentifierSourceLocations::iterator locs = TypoCorrectionFailures.find(Typo);
if (locs != TypoCorrectionFailures.end() &&
locs->second.count(TypoName.getLoc()))
- return TypoCorrection();
+ return nullptr;
// Don't try to correct the identifier "vector" when in AltiVec mode.
// TODO: Figure out why typo correction misbehaves in this case, fix it, and
// remove this workaround.
if (getLangOpts().AltiVec && Typo->isStr("vector"))
- return TypoCorrection();
+ return nullptr;
+
+ // Provide a stop gap for files that are just seriously broken. Trying
+ // to correct all typos can turn into a HUGE performance penalty, causing
+ // some files to take minutes to get rejected by the parser.
+ unsigned Limit = getDiagnostics().getDiagnosticOptions().SpellCheckingLimit;
+ if (Limit && TyposCorrected >= Limit)
+ return nullptr;
+ ++TyposCorrected;
// If we're handling a missing symbol error, using modules, and the
// special search all modules option is used, look for a missing import.
- if ((Mode == CTK_ErrorRecovery) && getLangOpts().Modules &&
+ if (ErrorRecovery && getLangOpts().Modules &&
getLangOpts().ModulesSearchAll) {
// The following has the side effect of loading the missing module.
getModuleLoader().lookupMissingImports(Typo->getName(),
TypoName.getLocStart());
}
- TypoCorrectionConsumer Consumer(*this, TypoName, LookupKind, S, SS, CCC,
- MemberContext, EnteringContext);
-
- // If a callback object considers an empty typo correction candidate to be
- // viable, assume it does not do any actual validation of the candidates.
- TypoCorrection EmptyCorrection;
- bool ValidatingCallback = !isCandidateViable(CCC, EmptyCorrection);
+ CorrectionCandidateCallback &CCCRef = *CCC;
+ auto Consumer = llvm::make_unique<TypoCorrectionConsumer>(
+ *this, TypoName, LookupKind, S, SS, std::move(CCC), MemberContext,
+ EnteringContext);
// Perform name lookup to find visible, similarly-named entities.
bool IsUnqualifiedLookup = false;
DeclContext *QualifiedDC = MemberContext;
if (MemberContext) {
- LookupVisibleDecls(MemberContext, LookupKind, Consumer);
+ LookupVisibleDecls(MemberContext, LookupKind, *Consumer);
// Look in qualified interfaces.
if (OPT) {
for (auto *I : OPT->quals())
- LookupVisibleDecls(I, LookupKind, Consumer);
+ LookupVisibleDecls(I, LookupKind, *Consumer);
}
} else if (SS && SS->isSet()) {
QualifiedDC = computeDeclContext(*SS, EnteringContext);
if (!QualifiedDC)
- return TypoCorrection();
+ return nullptr;
- // Provide a stop gap for files that are just seriously broken. Trying
- // to correct all typos can turn into a HUGE performance penalty, causing
- // some files to take minutes to get rejected by the parser.
- if (TyposCorrected + UnqualifiedTyposCorrected.size() >= 20)
- return TypoCorrection();
- ++TyposCorrected;
-
- LookupVisibleDecls(QualifiedDC, LookupKind, Consumer);
+ LookupVisibleDecls(QualifiedDC, LookupKind, *Consumer);
} else {
IsUnqualifiedLookup = true;
- UnqualifiedTyposCorrectedMap::iterator Cached
- = UnqualifiedTyposCorrected.find(Typo);
- if (Cached != UnqualifiedTyposCorrected.end()) {
- // Add the cached value, unless it's a keyword or fails validation. In the
- // keyword case, we'll end up adding the keyword below.
- if (Cached->second) {
- if (!Cached->second.isKeyword() &&
- isCandidateViable(CCC, Cached->second)) {
- // Do not use correction that is unaccessible in the given scope.
- NamedDecl *CorrectionDecl = Cached->second.getCorrectionDecl();
- DeclarationNameInfo NameInfo(CorrectionDecl->getDeclName(),
- CorrectionDecl->getLocation());
- LookupResult R(*this, NameInfo, LookupOrdinaryName);
- if (LookupName(R, S))
- Consumer.addCorrection(Cached->second);
- }
- } else {
- // Only honor no-correction cache hits when a callback that will validate
- // correction candidates is not being used.
- if (!ValidatingCallback)
- return TypoCorrection();
- }
- }
- if (Cached == UnqualifiedTyposCorrected.end()) {
- // Provide a stop gap for files that are just seriously broken. Trying
- // to correct all typos can turn into a HUGE performance penalty, causing
- // some files to take minutes to get rejected by the parser.
- if (TyposCorrected + UnqualifiedTyposCorrected.size() >= 20)
- return TypoCorrection();
- }
}
// Determine whether we are going to search in the various namespaces for
@@ -4276,17 +4115,13 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
bool SearchNamespaces
= getLangOpts().CPlusPlus &&
(IsUnqualifiedLookup || (SS && SS->isSet()));
- // In a few cases we *only* want to search for corrections based on just
- // adding or changing the nested name specifier.
- unsigned TypoLen = Typo->getName().size();
- bool AllowOnlyNNSChanges = TypoLen < 3;
if (IsUnqualifiedLookup || SearchNamespaces) {
// For unqualified lookup, look through all of the names that we have
// seen in this translation unit.
// FIXME: Re-add the ability to skip very unlikely potential corrections.
for (const auto &I : Context.Idents)
- Consumer.FoundName(I.getKey());
+ Consumer->FoundName(I.getKey());
// Walk through identifiers in external identifier sources.
// FIXME: Re-add the ability to skip very unlikely potential corrections.
@@ -4298,24 +4133,12 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
if (Name.empty())
break;
- Consumer.FoundName(Name);
+ Consumer->FoundName(Name);
} while (true);
}
}
- AddKeywordsToConsumer(*this, Consumer, S, CCC, SS && SS->isNotEmpty());
-
- // If we haven't found anything, we're done.
- if (Consumer.empty())
- return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure,
- IsUnqualifiedLookup);
-
- // Make sure the best edit distance (prior to adding any namespace qualifiers)
- // is not more that about a third of the length of the typo's identifier.
- unsigned ED = Consumer.getBestEditDistance(true);
- if (ED > 0 && TypoLen / ED < 3)
- return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure,
- IsUnqualifiedLookup);
+ AddKeywordsToConsumer(*this, *Consumer, S, CCCRef, SS && SS->isNotEmpty());
// Build the NestedNameSpecifiers for the KnownNamespaces, if we're going
// to search those namespaces.
@@ -4329,22 +4152,99 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
KnownNamespaces[N] = true;
}
- Consumer.addNamespaces(KnownNamespaces);
+ Consumer->addNamespaces(KnownNamespaces);
}
- TypoCorrection BestTC = Consumer.getNextCorrection();
- TypoCorrection SecondBestTC = Consumer.getNextCorrection();
+ return Consumer;
+}
+
+/// \brief Try to "correct" a typo in the source code by finding
+/// visible declarations whose names are similar to the name that was
+/// present in the source code.
+///
+/// \param TypoName the \c DeclarationNameInfo structure that contains
+/// the name that was present in the source code along with its location.
+///
+/// \param LookupKind the name-lookup criteria used to search for the name.
+///
+/// \param S the scope in which name lookup occurs.
+///
+/// \param SS the nested-name-specifier that precedes the name we're
+/// looking for, if present.
+///
+/// \param CCC A CorrectionCandidateCallback object that provides further
+/// validation of typo correction candidates. It also provides flags for
+/// determining the set of keywords permitted.
+///
+/// \param MemberContext if non-NULL, the context in which to look for
+/// a member access expression.
+///
+/// \param EnteringContext whether we're entering the context described by
+/// the nested-name-specifier SS.
+///
+/// \param OPT when non-NULL, the search for visible declarations will
+/// also walk the protocols in the qualified interfaces of \p OPT.
+///
+/// \returns a \c TypoCorrection containing the corrected name if the typo
+/// along with information such as the \c NamedDecl where the corrected name
+/// was declared, and any additional \c NestedNameSpecifier needed to access
+/// it (C++ only). The \c TypoCorrection is empty if there is no correction.
+TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
+ Sema::LookupNameKind LookupKind,
+ Scope *S, CXXScopeSpec *SS,
+ std::unique_ptr<CorrectionCandidateCallback> CCC,
+ CorrectTypoKind Mode,
+ DeclContext *MemberContext,
+ bool EnteringContext,
+ const ObjCObjectPointerType *OPT,
+ bool RecordFailure) {
+ assert(CCC && "CorrectTypo requires a CorrectionCandidateCallback");
+
+ // Always let the ExternalSource have the first chance at correction, even
+ // if we would otherwise have given up.
+ if (ExternalSource) {
+ if (TypoCorrection Correction = ExternalSource->CorrectTypo(
+ TypoName, LookupKind, S, SS, *CCC, MemberContext, EnteringContext, OPT))
+ return Correction;
+ }
+
+ // Ugly hack equivalent to CTC == CTC_ObjCMessageReceiver;
+ // WantObjCSuper is only true for CTC_ObjCMessageReceiver and for
+ // some instances of CTC_Unknown, while WantRemainingKeywords is true
+ // for CTC_Unknown but not for CTC_ObjCMessageReceiver.
+ bool ObjCMessageReceiver = CCC->WantObjCSuper && !CCC->WantRemainingKeywords;
+
+ IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo();
+ auto Consumer = makeTypoCorrectionConsumer(
+ TypoName, LookupKind, S, SS, std::move(CCC), MemberContext,
+ EnteringContext, OPT, Mode == CTK_ErrorRecovery);
+
+ if (!Consumer)
+ return TypoCorrection();
+
+ // If we haven't found anything, we're done.
+ if (Consumer->empty())
+ return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
+
+ // Make sure the best edit distance (prior to adding any namespace qualifiers)
+ // is not more that about a third of the length of the typo's identifier.
+ unsigned ED = Consumer->getBestEditDistance(true);
+ unsigned TypoLen = Typo->getName().size();
+ if (ED > 0 && TypoLen / ED < 3)
+ return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
+
+ TypoCorrection BestTC = Consumer->getNextCorrection();
+ TypoCorrection SecondBestTC = Consumer->getNextCorrection();
if (!BestTC)
return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
ED = BestTC.getEditDistance();
- if (!AllowOnlyNNSChanges && ED > 0 && TypoLen / ED < 3) {
+ if (TypoLen >= 3 && ED > 0 && TypoLen / ED < 3) {
// If this was an unqualified lookup and we believe the callback
// object wouldn't have filtered out possible corrections, note
// that no correction was found.
- return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure,
- IsUnqualifiedLookup && !ValidatingCallback);
+ return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
}
// If only a single name remains, return that result.
@@ -4357,28 +4257,19 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
if (ED == 0 && Result.isKeyword())
return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
- // Record the correction for unqualified lookup.
- if (IsUnqualifiedLookup)
- UnqualifiedTyposCorrected[Typo] = Result;
-
TypoCorrection TC = Result;
TC.setCorrectionRange(SS, TypoName);
checkCorrectionVisibility(*this, TC);
return TC;
- }
- // Ugly hack equivalent to CTC == CTC_ObjCMessageReceiver;
- // WantObjCSuper is only true for CTC_ObjCMessageReceiver and for
- // some instances of CTC_Unknown, while WantRemainingKeywords is true
- // for CTC_Unknown but not for CTC_ObjCMessageReceiver.
- else if (SecondBestTC && CCC.WantObjCSuper && !CCC.WantRemainingKeywords) {
+ } else if (SecondBestTC && ObjCMessageReceiver) {
// Prefer 'super' when we're completing in a message-receiver
// context.
if (BestTC.getCorrection().getAsString() != "super") {
if (SecondBestTC.getCorrection().getAsString() == "super")
BestTC = SecondBestTC;
- else if (Consumer["super"].front().isKeyword())
- BestTC = Consumer["super"].front();
+ else if ((*Consumer)["super"].front().isKeyword())
+ BestTC = (*Consumer)["super"].front();
}
// Don't correct to a keyword that's the same as the typo; the keyword
// wasn't actually in scope.
@@ -4386,10 +4277,6 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
BestTC.getCorrection().getAsString() != "super")
return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
- // Record the correction for unqualified lookup.
- if (IsUnqualifiedLookup)
- UnqualifiedTyposCorrected[Typo] = BestTC;
-
BestTC.setCorrectionRange(SS, TypoName);
return BestTC;
}
@@ -4397,8 +4284,75 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
// Record the failure's location if needed and return an empty correction. If
// this was an unqualified lookup and we believe the callback object did not
// filter out possible corrections, also cache the failure for the typo.
- return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure,
- IsUnqualifiedLookup && !ValidatingCallback);
+ return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
+}
+
+/// \brief Try to "correct" a typo in the source code by finding
+/// visible declarations whose names are similar to the name that was
+/// present in the source code.
+///
+/// \param TypoName the \c DeclarationNameInfo structure that contains
+/// the name that was present in the source code along with its location.
+///
+/// \param LookupKind the name-lookup criteria used to search for the name.
+///
+/// \param S the scope in which name lookup occurs.
+///
+/// \param SS the nested-name-specifier that precedes the name we're
+/// looking for, if present.
+///
+/// \param CCC A CorrectionCandidateCallback object that provides further
+/// validation of typo correction candidates. It also provides flags for
+/// determining the set of keywords permitted.
+///
+/// \param TDG A TypoDiagnosticGenerator functor that will be used to print
+/// diagnostics when the actual typo correction is attempted.
+///
+/// \param TRC A TypoRecoveryCallback functor that will be used to build an
+/// Expr from a typo correction candidate.
+///
+/// \param MemberContext if non-NULL, the context in which to look for
+/// a member access expression.
+///
+/// \param EnteringContext whether we're entering the context described by
+/// the nested-name-specifier SS.
+///
+/// \param OPT when non-NULL, the search for visible declarations will
+/// also walk the protocols in the qualified interfaces of \p OPT.
+///
+/// \returns a new \c TypoExpr that will later be replaced in the AST with an
+/// Expr representing the result of performing typo correction, or nullptr if
+/// typo correction is not possible. If nullptr is returned, no diagnostics will
+/// be emitted and it is the responsibility of the caller to emit any that are
+/// needed.
+TypoExpr *Sema::CorrectTypoDelayed(
+ const DeclarationNameInfo &TypoName, Sema::LookupNameKind LookupKind,
+ Scope *S, CXXScopeSpec *SS,
+ std::unique_ptr<CorrectionCandidateCallback> CCC,
+ TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode,
+ DeclContext *MemberContext, bool EnteringContext,
+ const ObjCObjectPointerType *OPT) {
+ assert(CCC && "CorrectTypoDelayed requires a CorrectionCandidateCallback");
+
+ TypoCorrection Empty;
+ auto Consumer = makeTypoCorrectionConsumer(
+ TypoName, LookupKind, S, SS, std::move(CCC), MemberContext,
+ EnteringContext, OPT,
+ /*SearchModules=*/(Mode == CTK_ErrorRecovery) && getLangOpts().Modules &&
+ getLangOpts().ModulesSearchAll);
+
+ if (!Consumer || Consumer->empty())
+ return nullptr;
+
+ // Make sure the best edit distance (prior to adding any namespace qualifiers)
+ // is not more that about a third of the length of the typo's identifier.
+ unsigned ED = Consumer->getBestEditDistance(true);
+ IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo();
+ if (ED > 0 && Typo->getName().size() / ED < 3)
+ return nullptr;
+
+ ExprEvalContexts.back().NumTypos++;
+ return createDelayedTypo(std::move(Consumer), std::move(TDG), std::move(TRC));
}
void TypoCorrection::addCorrectionDecl(NamedDecl *CDecl) {
@@ -4425,7 +4379,8 @@ std::string TypoCorrection::getAsString(const LangOptions &LO) const {
return CorrectionName.getAsString();
}
-bool CorrectionCandidateCallback::ValidateCandidate(const TypoCorrection &candidate) {
+bool CorrectionCandidateCallback::ValidateCandidate(
+ const TypoCorrection &candidate) {
if (!candidate.isResolved())
return true;
@@ -4461,7 +4416,8 @@ FunctionCallFilterCCC::FunctionCallFilterCCC(Sema &SemaRef, unsigned NumArgs,
MemberExpr *ME)
: NumArgs(NumArgs), HasExplicitTemplateArgs(HasExplicitTemplateArgs),
CurContext(SemaRef.CurContext), MemberFn(ME) {
- WantTypeSpecifiers = SemaRef.getLangOpts().CPlusPlus;
+ WantTypeSpecifiers = false;
+ WantFunctionLikeCasts = SemaRef.getLangOpts().CPlusPlus && NumArgs == 1;
WantRemainingKeywords = false;
}
@@ -4596,3 +4552,26 @@ void Sema::diagnoseTypo(const TypoCorrection &Correction,
Diag(ChosenDecl->getLocation(), PrevNote)
<< CorrectedQuotedStr << (ErrorRecovery ? FixItHint() : FixTypo);
}
+
+TypoExpr *Sema::createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
+ TypoDiagnosticGenerator TDG,
+ TypoRecoveryCallback TRC) {
+ assert(TCC && "createDelayedTypo requires a valid TypoCorrectionConsumer");
+ auto TE = new (Context) TypoExpr(Context.DependentTy);
+ auto &State = DelayedTypos[TE];
+ State.Consumer = std::move(TCC);
+ State.DiagHandler = std::move(TDG);
+ State.RecoveryHandler = std::move(TRC);
+ return TE;
+}
+
+const Sema::TypoExprState &Sema::getTypoExprState(TypoExpr *TE) const {
+ auto Entry = DelayedTypos.find(TE);
+ assert(Entry != DelayedTypos.end() &&
+ "Failed to get the state for a TypoExpr!");
+ return Entry->second;
+}
+
+void Sema::clearDelayedTypo(TypoExpr *TE) {
+ DelayedTypos.erase(TE);
+}
diff --git a/lib/Sema/SemaObjCProperty.cpp b/lib/Sema/SemaObjCProperty.cpp
index 8eb806ba301a..72b6020351fa 100644
--- a/lib/Sema/SemaObjCProperty.cpp
+++ b/lib/Sema/SemaObjCProperty.cpp
@@ -116,9 +116,9 @@ static unsigned deduceWeakPropertyFromType(Sema &S, QualType T) {
static void
CheckPropertyAgainstProtocol(Sema &S, ObjCPropertyDecl *Prop,
ObjCProtocolDecl *Proto,
- llvm::SmallPtrSet<ObjCProtocolDecl *, 16> &Known) {
+ llvm::SmallPtrSetImpl<ObjCProtocolDecl *> &Known) {
// Have we seen this protocol before?
- if (!Known.insert(Proto))
+ if (!Known.insert(Proto).second)
return;
// Look for a property with the same name.
@@ -1547,36 +1547,22 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl,
if (IMPDecl->getInstanceMethod(Prop->getSetterName()))
continue;
}
- // If property to be implemented in the super class, ignore.
- if (SuperPropMap[Prop->getIdentifier()]) {
- ObjCPropertyDecl *PropInSuperClass = SuperPropMap[Prop->getIdentifier()];
- if ((Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite) &&
- (PropInSuperClass->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_readonly) &&
- !IMPDecl->getInstanceMethod(Prop->getSetterName()) &&
- !IDecl->HasUserDeclaredSetterMethod(Prop)) {
- Diag(Prop->getLocation(), diag::warn_no_autosynthesis_property)
- << Prop->getIdentifier();
- Diag(PropInSuperClass->getLocation(), diag::note_property_declare);
- }
- continue;
- }
if (ObjCPropertyImplDecl *PID =
IMPDecl->FindPropertyImplIvarDecl(Prop->getIdentifier())) {
- if (PID->getPropertyDecl() != Prop) {
- Diag(Prop->getLocation(), diag::warn_no_autosynthesis_shared_ivar_property)
- << Prop->getIdentifier();
- if (!PID->getLocation().isInvalid())
- Diag(PID->getLocation(), diag::note_property_synthesize);
- }
+ Diag(Prop->getLocation(), diag::warn_no_autosynthesis_shared_ivar_property)
+ << Prop->getIdentifier();
+ if (!PID->getLocation().isInvalid())
+ Diag(PID->getLocation(), diag::note_property_synthesize);
continue;
}
+ ObjCPropertyDecl *PropInSuperClass = SuperPropMap[Prop->getIdentifier()];
if (ObjCProtocolDecl *Proto =
dyn_cast<ObjCProtocolDecl>(Prop->getDeclContext())) {
// We won't auto-synthesize properties declared in protocols.
// Suppress the warning if class's superclass implements property's
// getter and implements property's setter (if readwrite property).
- if (!SuperClassImplementsProperty(IDecl, Prop)) {
+ // Or, if property is going to be implemented in its super class.
+ if (!SuperClassImplementsProperty(IDecl, Prop) && !PropInSuperClass) {
Diag(IMPDecl->getLocation(),
diag::warn_auto_synthesizing_protocol_property)
<< Prop << Proto;
@@ -1584,7 +1570,25 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl,
}
continue;
}
-
+ // If property to be implemented in the super class, ignore.
+ if (PropInSuperClass) {
+ if ((Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite) &&
+ (PropInSuperClass->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_readonly) &&
+ !IMPDecl->getInstanceMethod(Prop->getSetterName()) &&
+ !IDecl->HasUserDeclaredSetterMethod(Prop)) {
+ Diag(Prop->getLocation(), diag::warn_no_autosynthesis_property)
+ << Prop->getIdentifier();
+ Diag(PropInSuperClass->getLocation(), diag::note_property_declare);
+ }
+ else {
+ Diag(Prop->getLocation(), diag::warn_autosynthesis_property_in_superclass)
+ << Prop->getIdentifier();
+ Diag(PropInSuperClass->getLocation(), diag::note_property_declare);
+ Diag(IMPDecl->getLocation(), diag::note_while_in_implementation);
+ }
+ continue;
+ }
// We use invalid SourceLocations for the synthesized ivars since they
// aren't really synthesized at a particular location; they just exist.
// Saying that they are located at the @implementation isn't really going
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index 7f2af68e55d5..d72942a2ffec 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclOpenMP.h"
@@ -91,15 +92,17 @@ private:
DeclarationNameInfo DirectiveName;
Scope *CurScope;
SourceLocation ConstructLoc;
+ bool OrderedRegion;
+ SourceLocation InnerTeamsRegionLoc;
SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
Scope *CurScope, SourceLocation Loc)
: SharingMap(), AlignedMap(), DefaultAttr(DSA_unspecified),
Directive(DKind), DirectiveName(std::move(Name)), CurScope(CurScope),
- ConstructLoc(Loc) {}
+ ConstructLoc(Loc), OrderedRegion(false), InnerTeamsRegionLoc() {}
SharingMapTy()
: SharingMap(), AlignedMap(), DefaultAttr(DSA_unspecified),
Directive(OMPD_unknown), DirectiveName(), CurScope(nullptr),
- ConstructLoc() {}
+ ConstructLoc(), OrderedRegion(false), InnerTeamsRegionLoc() {}
};
typedef SmallVector<SharingMapTy, 64> StackTy;
@@ -194,13 +197,42 @@ public:
return isOpenMPThreadPrivate(DVar.CKind);
}
+ /// \brief Marks current region as ordered (it has an 'ordered' clause).
+ void setOrderedRegion(bool IsOrdered = true) {
+ Stack.back().OrderedRegion = IsOrdered;
+ }
+ /// \brief Returns true, if parent region is ordered (has associated
+ /// 'ordered' clause), false - otherwise.
+ bool isParentOrderedRegion() const {
+ if (Stack.size() > 2)
+ return Stack[Stack.size() - 2].OrderedRegion;
+ return false;
+ }
+
+ /// \brief Marks current target region as one with closely nested teams
+ /// region.
+ void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
+ if (Stack.size() > 2)
+ Stack[Stack.size() - 2].InnerTeamsRegionLoc = TeamsRegionLoc;
+ }
+ /// \brief Returns true, if current region has closely nested teams region.
+ bool hasInnerTeamsRegion() const {
+ return getInnerTeamsRegionLoc().isValid();
+ }
+ /// \brief Returns location of the nested teams region (if any).
+ SourceLocation getInnerTeamsRegionLoc() const {
+ if (Stack.size() > 1)
+ return Stack.back().InnerTeamsRegionLoc;
+ return SourceLocation();
+ }
+
Scope *getCurScope() const { return Stack.back().CurScope; }
Scope *getCurScope() { return Stack.back().CurScope; }
SourceLocation getConstructLoc() { return Stack.back().ConstructLoc; }
};
bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
return isOpenMPParallelDirective(DKind) || DKind == OMPD_task ||
- DKind == OMPD_unknown;
+ isOpenMPTeamsDirective(DKind) || DKind == OMPD_unknown;
}
} // namespace
@@ -213,7 +245,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator Iter,
// File-scope or namespace-scope variables referenced in called routines
// in the region are shared unless they appear in a threadprivate
// directive.
- if (!D->isFunctionOrMethodVarDecl())
+ if (!D->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(D))
DVar.CKind = OMPC_shared;
// OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
@@ -263,7 +295,8 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator Iter,
// In a parallel construct, if no default clause is present, these
// variables are shared.
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
- if (isOpenMPParallelDirective(DVar.DKind)) {
+ if (isOpenMPParallelDirective(DVar.DKind) ||
+ isOpenMPTeamsDirective(DVar.DKind)) {
DVar.CKind = OMPC_shared;
return DVar;
}
@@ -358,7 +391,8 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D, bool FromParent) {
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.1]
// Variables appearing in threadprivate directives are threadprivate.
- if (D->getTLSKind() != VarDecl::TLS_None) {
+ if (D->getTLSKind() != VarDecl::TLS_None ||
+ D->getStorageClass() == SC_Register) {
DVar.CKind = OMPC_threadprivate;
return DVar;
}
@@ -380,8 +414,10 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D, bool FromParent) {
StartI = std::next(StartI);
}
if (!isParallelOrTaskRegion(Kind)) {
- if (isOpenMPLocal(D, StartI) && D->isLocalVarDecl() &&
- (D->getStorageClass() == SC_Auto || D->getStorageClass() == SC_None)) {
+ if (isOpenMPLocal(D, StartI) &&
+ ((D->isLocalVarDecl() && (D->getStorageClass() == SC_Auto ||
+ D->getStorageClass() == SC_None)) ||
+ isa<ParmVarDecl>(D))) {
DVar.CKind = OMPC_private;
return DVar;
}
@@ -516,6 +552,19 @@ void Sema::InitDataSharingAttributesStack() {
#define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
+bool Sema::IsOpenMPCapturedVar(VarDecl *VD) {
+ assert(LangOpts.OpenMP && "OpenMP is not allowed");
+ if (DSAStack->getCurrentDirective() != OMPD_unknown) {
+ auto DVarPrivate = DSAStack->getTopDSA(VD, /*FromParent=*/false);
+ if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
+ return true;
+ DVarPrivate = DSAStack->hasDSA(VD, isOpenMPPrivate, MatchesAlways(),
+ /*FromParent=*/false);
+ return DVarPrivate.CKind != OMPC_unknown;
+ }
+ return false;
+}
+
void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
@@ -612,10 +661,9 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
VarDecl *VD;
if (!Lookup.isSingleResult()) {
- VarDeclFilterCCC Validator(*this);
- if (TypoCorrection Corrected =
- CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, Validator,
- CTK_ErrorRecovery)) {
+ if (TypoCorrection Corrected = CorrectTypo(
+ Id, LookupOrdinaryName, CurScope, nullptr,
+ llvm::make_unique<VarDeclFilterCCC>(*this), CTK_ErrorRecovery)) {
diagnoseTypo(Corrected,
PDiag(Lookup.empty()
? diag::err_undeclared_var_use_suggest
@@ -794,8 +842,10 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
}
// Check if this is a TLS variable.
- if (VD->getTLSKind()) {
- Diag(ILoc, diag::err_omp_var_thread_local) << VD;
+ if (VD->getTLSKind() != VarDecl::TLS_None ||
+ VD->getStorageClass() == SC_Register) {
+ Diag(ILoc, diag::err_omp_var_thread_local)
+ << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
@@ -814,6 +864,10 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
Vars.push_back(RefExpr);
DSAStack->addDSA(VD, DE, OMPC_threadprivate);
+ VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
+ Context, SourceRange(Loc, Loc)));
+ if (auto *ML = Context.getASTMutationListener())
+ ML->DeclarationMarkedOpenMPThreadPrivate(VD);
}
OMPThreadPrivateDecl *D = nullptr;
if (!Vars.empty()) {
@@ -918,7 +972,8 @@ public:
DVar = Stack->hasInnermostDSA(VD, MatchesAnyClause(OMPC_reduction),
[](OpenMPDirectiveKind K) -> bool {
return isOpenMPParallelDirective(K) ||
- isOpenMPWorksharingDirective(K);
+ isOpenMPWorksharingDirective(K) ||
+ isOpenMPTeamsDirective(K);
},
false);
if (DKind == OMPD_task && DVar.CKind == OMPC_reduction) {
@@ -993,6 +1048,14 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Params);
break;
}
+ case OMPD_for_simd: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
case OMPD_sections: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
@@ -1045,6 +1108,18 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Params);
break;
}
+ case OMPD_parallel_for_simd: {
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
+ QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty);
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(".global_tid.", KmpInt32PtrTy),
+ std::make_pair(".bound_tid.", KmpInt32PtrTy),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
case OMPD_parallel_sections: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
@@ -1061,7 +1136,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Params);
break;
}
- case OMPD_taskyield: {
+ case OMPD_ordered: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
@@ -1069,7 +1144,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Params);
break;
}
- case OMPD_barrier: {
+ case OMPD_atomic: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
@@ -1077,7 +1152,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Params);
break;
}
- case OMPD_taskwait: {
+ case OMPD_target: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
@@ -1085,8 +1160,12 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Params);
break;
}
- case OMPD_flush: {
+ case OMPD_teams: {
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
+ QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty);
Sema::CapturedParamNameType Params[] = {
+ std::make_pair(".global_tid.", KmpInt32PtrTy),
+ std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
@@ -1094,6 +1173,10 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
break;
}
case OMPD_threadprivate:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_flush:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
@@ -1110,6 +1193,7 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// +------------------+-----------------+------------------------------------+
// | parallel | parallel | * |
// | parallel | for | * |
+ // | parallel | for simd | * |
// | parallel | master | * |
// | parallel | critical | * |
// | parallel | simd | * |
@@ -1117,15 +1201,21 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | parallel | section | + |
// | parallel | single | * |
// | parallel | parallel for | * |
+ // | parallel |parallel for simd| * |
// | parallel |parallel sections| * |
// | parallel | task | * |
// | parallel | taskyield | * |
// | parallel | barrier | * |
// | parallel | taskwait | * |
// | parallel | flush | * |
+ // | parallel | ordered | + |
+ // | parallel | atomic | * |
+ // | parallel | target | * |
+ // | parallel | teams | + |
// +------------------+-----------------+------------------------------------+
// | for | parallel | * |
// | for | for | + |
+ // | for | for simd | + |
// | for | master | + |
// | for | critical | * |
// | for | simd | * |
@@ -1133,15 +1223,21 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | for | section | + |
// | for | single | + |
// | for | parallel for | * |
+ // | for |parallel for simd| * |
// | for |parallel sections| * |
// | for | task | * |
// | for | taskyield | * |
// | for | barrier | + |
// | for | taskwait | * |
// | for | flush | * |
+ // | for | ordered | * (if construct is ordered) |
+ // | for | atomic | * |
+ // | for | target | * |
+ // | for | teams | + |
// +------------------+-----------------+------------------------------------+
// | master | parallel | * |
// | master | for | + |
+ // | master | for simd | + |
// | master | master | * |
// | master | critical | * |
// | master | simd | * |
@@ -1149,30 +1245,42 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | master | section | + |
// | master | single | + |
// | master | parallel for | * |
+ // | master |parallel for simd| * |
// | master |parallel sections| * |
// | master | task | * |
// | master | taskyield | * |
// | master | barrier | + |
// | master | taskwait | * |
// | master | flush | * |
+ // | master | ordered | + |
+ // | master | atomic | * |
+ // | master | target | * |
+ // | master | teams | + |
// +------------------+-----------------+------------------------------------+
// | critical | parallel | * |
// | critical | for | + |
+ // | critical | for simd | + |
// | critical | master | * |
- // | critical | critical | * (should have dirrerent names) |
+ // | critical | critical | * (should have different names) |
// | critical | simd | * |
// | critical | sections | + |
// | critical | section | + |
// | critical | single | + |
// | critical | parallel for | * |
+ // | critical |parallel for simd| * |
// | critical |parallel sections| * |
// | critical | task | * |
// | critical | taskyield | * |
// | critical | barrier | + |
// | critical | taskwait | * |
+ // | critical | ordered | + |
+ // | critical | atomic | * |
+ // | critical | target | * |
+ // | critical | teams | + |
// +------------------+-----------------+------------------------------------+
// | simd | parallel | |
// | simd | for | |
+ // | simd | for simd | |
// | simd | master | |
// | simd | critical | |
// | simd | simd | |
@@ -1180,15 +1288,65 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | simd | section | |
// | simd | single | |
// | simd | parallel for | |
+ // | simd |parallel for simd| |
// | simd |parallel sections| |
// | simd | task | |
// | simd | taskyield | |
// | simd | barrier | |
// | simd | taskwait | |
// | simd | flush | |
+ // | simd | ordered | |
+ // | simd | atomic | |
+ // | simd | target | |
+ // | simd | teams | |
+ // +------------------+-----------------+------------------------------------+
+ // | for simd | parallel | |
+ // | for simd | for | |
+ // | for simd | for simd | |
+ // | for simd | master | |
+ // | for simd | critical | |
+ // | for simd | simd | |
+ // | for simd | sections | |
+ // | for simd | section | |
+ // | for simd | single | |
+ // | for simd | parallel for | |
+ // | for simd |parallel for simd| |
+ // | for simd |parallel sections| |
+ // | for simd | task | |
+ // | for simd | taskyield | |
+ // | for simd | barrier | |
+ // | for simd | taskwait | |
+ // | for simd | flush | |
+ // | for simd | ordered | |
+ // | for simd | atomic | |
+ // | for simd | target | |
+ // | for simd | teams | |
+ // +------------------+-----------------+------------------------------------+
+ // | parallel for simd| parallel | |
+ // | parallel for simd| for | |
+ // | parallel for simd| for simd | |
+ // | parallel for simd| master | |
+ // | parallel for simd| critical | |
+ // | parallel for simd| simd | |
+ // | parallel for simd| sections | |
+ // | parallel for simd| section | |
+ // | parallel for simd| single | |
+ // | parallel for simd| parallel for | |
+ // | parallel for simd|parallel for simd| |
+ // | parallel for simd|parallel sections| |
+ // | parallel for simd| task | |
+ // | parallel for simd| taskyield | |
+ // | parallel for simd| barrier | |
+ // | parallel for simd| taskwait | |
+ // | parallel for simd| flush | |
+ // | parallel for simd| ordered | |
+ // | parallel for simd| atomic | |
+ // | parallel for simd| target | |
+ // | parallel for simd| teams | |
// +------------------+-----------------+------------------------------------+
// | sections | parallel | * |
// | sections | for | + |
+ // | sections | for simd | + |
// | sections | master | + |
// | sections | critical | * |
// | sections | simd | * |
@@ -1196,15 +1354,21 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | sections | section | * |
// | sections | single | + |
// | sections | parallel for | * |
+ // | sections |parallel for simd| * |
// | sections |parallel sections| * |
// | sections | task | * |
// | sections | taskyield | * |
// | sections | barrier | + |
// | sections | taskwait | * |
// | sections | flush | * |
+ // | sections | ordered | + |
+ // | sections | atomic | * |
+ // | sections | target | * |
+ // | sections | teams | + |
// +------------------+-----------------+------------------------------------+
// | section | parallel | * |
// | section | for | + |
+ // | section | for simd | + |
// | section | master | + |
// | section | critical | * |
// | section | simd | * |
@@ -1212,15 +1376,21 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | section | section | + |
// | section | single | + |
// | section | parallel for | * |
+ // | section |parallel for simd| * |
// | section |parallel sections| * |
// | section | task | * |
// | section | taskyield | * |
// | section | barrier | + |
// | section | taskwait | * |
// | section | flush | * |
+ // | section | ordered | + |
+ // | section | atomic | * |
+ // | section | target | * |
+ // | section | teams | + |
// +------------------+-----------------+------------------------------------+
// | single | parallel | * |
// | single | for | + |
+ // | single | for simd | + |
// | single | master | + |
// | single | critical | * |
// | single | simd | * |
@@ -1228,15 +1398,21 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | single | section | + |
// | single | single | + |
// | single | parallel for | * |
+ // | single |parallel for simd| * |
// | single |parallel sections| * |
// | single | task | * |
// | single | taskyield | * |
// | single | barrier | + |
// | single | taskwait | * |
// | single | flush | * |
+ // | single | ordered | + |
+ // | single | atomic | * |
+ // | single | target | * |
+ // | single | teams | + |
// +------------------+-----------------+------------------------------------+
// | parallel for | parallel | * |
// | parallel for | for | + |
+ // | parallel for | for simd | + |
// | parallel for | master | + |
// | parallel for | critical | * |
// | parallel for | simd | * |
@@ -1244,15 +1420,21 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | parallel for | section | + |
// | parallel for | single | + |
// | parallel for | parallel for | * |
+ // | parallel for |parallel for simd| * |
// | parallel for |parallel sections| * |
// | parallel for | task | * |
// | parallel for | taskyield | * |
// | parallel for | barrier | + |
// | parallel for | taskwait | * |
// | parallel for | flush | * |
+ // | parallel for | ordered | * (if construct is ordered) |
+ // | parallel for | atomic | * |
+ // | parallel for | target | * |
+ // | parallel for | teams | + |
// +------------------+-----------------+------------------------------------+
// | parallel sections| parallel | * |
// | parallel sections| for | + |
+ // | parallel sections| for simd | + |
// | parallel sections| master | + |
// | parallel sections| critical | + |
// | parallel sections| simd | * |
@@ -1260,15 +1442,21 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | parallel sections| section | * |
// | parallel sections| single | + |
// | parallel sections| parallel for | * |
+ // | parallel sections|parallel for simd| * |
// | parallel sections|parallel sections| * |
// | parallel sections| task | * |
// | parallel sections| taskyield | * |
// | parallel sections| barrier | + |
// | parallel sections| taskwait | * |
// | parallel sections| flush | * |
+ // | parallel sections| ordered | + |
+ // | parallel sections| atomic | * |
+ // | parallel sections| target | * |
+ // | parallel sections| teams | + |
// +------------------+-----------------+------------------------------------+
// | task | parallel | * |
// | task | for | + |
+ // | task | for simd | + |
// | task | master | + |
// | task | critical | * |
// | task | simd | * |
@@ -1276,24 +1464,128 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | task | section | + |
// | task | single | + |
// | task | parallel for | * |
+ // | task |parallel for simd| * |
// | task |parallel sections| * |
// | task | task | * |
// | task | taskyield | * |
// | task | barrier | + |
// | task | taskwait | * |
// | task | flush | * |
+ // | task | ordered | + |
+ // | task | atomic | * |
+ // | task | target | * |
+ // | task | teams | + |
+ // +------------------+-----------------+------------------------------------+
+ // | ordered | parallel | * |
+ // | ordered | for | + |
+ // | ordered | for simd | + |
+ // | ordered | master | * |
+ // | ordered | critical | * |
+ // | ordered | simd | * |
+ // | ordered | sections | + |
+ // | ordered | section | + |
+ // | ordered | single | + |
+ // | ordered | parallel for | * |
+ // | ordered |parallel for simd| * |
+ // | ordered |parallel sections| * |
+ // | ordered | task | * |
+ // | ordered | taskyield | * |
+ // | ordered | barrier | + |
+ // | ordered | taskwait | * |
+ // | ordered | flush | * |
+ // | ordered | ordered | + |
+ // | ordered | atomic | * |
+ // | ordered | target | * |
+ // | ordered | teams | + |
+ // +------------------+-----------------+------------------------------------+
+ // | atomic | parallel | |
+ // | atomic | for | |
+ // | atomic | for simd | |
+ // | atomic | master | |
+ // | atomic | critical | |
+ // | atomic | simd | |
+ // | atomic | sections | |
+ // | atomic | section | |
+ // | atomic | single | |
+ // | atomic | parallel for | |
+ // | atomic |parallel for simd| |
+ // | atomic |parallel sections| |
+ // | atomic | task | |
+ // | atomic | taskyield | |
+ // | atomic | barrier | |
+ // | atomic | taskwait | |
+ // | atomic | flush | |
+ // | atomic | ordered | |
+ // | atomic | atomic | |
+ // | atomic | target | |
+ // | atomic | teams | |
+ // +------------------+-----------------+------------------------------------+
+ // | target | parallel | * |
+ // | target | for | * |
+ // | target | for simd | * |
+ // | target | master | * |
+ // | target | critical | * |
+ // | target | simd | * |
+ // | target | sections | * |
+ // | target | section | * |
+ // | target | single | * |
+ // | target | parallel for | * |
+ // | target |parallel for simd| * |
+ // | target |parallel sections| * |
+ // | target | task | * |
+ // | target | taskyield | * |
+ // | target | barrier | * |
+ // | target | taskwait | * |
+ // | target | flush | * |
+ // | target | ordered | * |
+ // | target | atomic | * |
+ // | target | target | * |
+ // | target | teams | * |
+ // +------------------+-----------------+------------------------------------+
+ // | teams | parallel | * |
+ // | teams | for | + |
+ // | teams | for simd | + |
+ // | teams | master | + |
+ // | teams | critical | + |
+ // | teams | simd | + |
+ // | teams | sections | + |
+ // | teams | section | + |
+ // | teams | single | + |
+ // | teams | parallel for | * |
+ // | teams |parallel for simd| * |
+ // | teams |parallel sections| * |
+ // | teams | task | + |
+ // | teams | taskyield | + |
+ // | teams | barrier | + |
+ // | teams | taskwait | + |
+ // | teams | flush | + |
+ // | teams | ordered | + |
+ // | teams | atomic | + |
+ // | teams | target | + |
+ // | teams | teams | + |
// +------------------+-----------------+------------------------------------+
if (Stack->getCurScope()) {
auto ParentRegion = Stack->getParentDirective();
bool NestingProhibited = false;
bool CloseNesting = true;
- bool ShouldBeInParallelRegion = false;
+ enum {
+ NoRecommend,
+ ShouldBeInParallelRegion,
+ ShouldBeInOrderedRegion,
+ ShouldBeInTargetRegion
+ } Recommend = NoRecommend;
if (isOpenMPSimdDirective(ParentRegion)) {
// OpenMP [2.16, Nesting of Regions]
// OpenMP constructs may not be nested inside a simd region.
SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_simd);
return true;
}
+ if (ParentRegion == OMPD_atomic) {
+ // OpenMP [2.16, Nesting of Regions]
+ // OpenMP constructs may not be nested inside an atomic region.
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
+ return true;
+ }
if (CurrentRegion == OMPD_section) {
// OpenMP [2.7.2, sections Construct, Restrictions]
// Orphaned section directives are prohibited. That is, the section
@@ -1308,10 +1600,14 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
}
return false;
}
+ // Allow some constructs to be orphaned (they could be used in functions,
+ // called from OpenMP regions with the required preconditions).
+ if (ParentRegion == OMPD_unknown)
+ return false;
if (CurrentRegion == OMPD_master) {
// OpenMP [2.16, Nesting of Regions]
// A master region may not be closely nested inside a worksharing,
- // atomic (TODO), or explicit task region.
+ // atomic, or explicit task region.
NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
ParentRegion == OMPD_task;
} else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
@@ -1346,30 +1642,52 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
} else if (CurrentRegion == OMPD_barrier) {
// OpenMP [2.16, Nesting of Regions]
// A barrier region may not be closely nested inside a worksharing,
- // explicit task, critical, ordered(TODO), atomic(TODO), or master
- // region.
- NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
- ParentRegion == OMPD_task ||
- ParentRegion == OMPD_master ||
- ParentRegion == OMPD_critical;
+ // explicit task, critical, ordered, atomic, or master region.
+ NestingProhibited =
+ isOpenMPWorksharingDirective(ParentRegion) ||
+ ParentRegion == OMPD_task || ParentRegion == OMPD_master ||
+ ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
} else if (isOpenMPWorksharingDirective(CurrentRegion) &&
- !isOpenMPParallelDirective(CurrentRegion) &&
- !isOpenMPSimdDirective(CurrentRegion)) {
+ !isOpenMPParallelDirective(CurrentRegion)) {
// OpenMP [2.16, Nesting of Regions]
// A worksharing region may not be closely nested inside a worksharing,
// explicit task, critical, ordered, atomic, or master region.
- // TODO
- NestingProhibited = (isOpenMPWorksharingDirective(ParentRegion) &&
- !isOpenMPSimdDirective(ParentRegion)) ||
+ NestingProhibited =
+ isOpenMPWorksharingDirective(ParentRegion) ||
+ ParentRegion == OMPD_task || ParentRegion == OMPD_master ||
+ ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
+ Recommend = ShouldBeInParallelRegion;
+ } else if (CurrentRegion == OMPD_ordered) {
+ // OpenMP [2.16, Nesting of Regions]
+ // An ordered region may not be closely nested inside a critical,
+ // atomic, or explicit task region.
+ // An ordered region must be closely nested inside a loop region (or
+ // parallel loop region) with an ordered clause.
+ NestingProhibited = ParentRegion == OMPD_critical ||
ParentRegion == OMPD_task ||
- ParentRegion == OMPD_master ||
- ParentRegion == OMPD_critical;
- ShouldBeInParallelRegion = true;
+ !Stack->isParentOrderedRegion();
+ Recommend = ShouldBeInOrderedRegion;
+ } else if (isOpenMPTeamsDirective(CurrentRegion)) {
+ // OpenMP [2.16, Nesting of Regions]
+ // If specified, a teams construct must be contained within a target
+ // construct.
+ NestingProhibited = ParentRegion != OMPD_target;
+ Recommend = ShouldBeInTargetRegion;
+ Stack->setParentTeamsRegionLoc(Stack->getConstructLoc());
+ }
+ if (!NestingProhibited && isOpenMPTeamsDirective(ParentRegion)) {
+ // OpenMP [2.16, Nesting of Regions]
+ // distribute, parallel, parallel sections, parallel workshare, and the
+ // parallel loop and parallel loop SIMD constructs are the only OpenMP
+ // constructs that can be closely nested in the teams region.
+ // TODO: add distribute directive.
+ NestingProhibited = !isOpenMPParallelDirective(CurrentRegion);
+ Recommend = ShouldBeInParallelRegion;
}
if (NestingProhibited) {
SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
- << CloseNesting << getOpenMPDirectiveName(ParentRegion)
- << ShouldBeInParallelRegion << getOpenMPDirectiveName(CurrentRegion);
+ << CloseNesting << getOpenMPDirectiveName(ParentRegion) << Recommend
+ << getOpenMPDirectiveName(CurrentRegion);
return true;
}
}
@@ -1426,6 +1744,10 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(OpenMPDirectiveKind Kind,
Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
VarsWithInheritedDSA);
break;
+ case OMPD_for_simd:
+ Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc, VarsWithInheritedDSA);
+ break;
case OMPD_sections:
Res = ActOnOpenMPSectionsDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc);
@@ -1453,6 +1775,10 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(OpenMPDirectiveKind Kind,
Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc, VarsWithInheritedDSA);
break;
+ case OMPD_parallel_for_simd:
+ Res = ActOnOpenMPParallelForSimdDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ break;
case OMPD_parallel_sections:
Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
StartLoc, EndLoc);
@@ -1487,6 +1813,23 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(OpenMPDirectiveKind Kind,
"No associated statement allowed for 'omp flush' directive");
Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
break;
+ case OMPD_ordered:
+ assert(ClausesWithImplicit.empty() &&
+ "No clauses are allowed for 'omp ordered' directive");
+ Res = ActOnOpenMPOrderedDirective(AStmt, StartLoc, EndLoc);
+ break;
+ case OMPD_atomic:
+ Res = ActOnOpenMPAtomicDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ break;
+ case OMPD_teams:
+ Res =
+ ActOnOpenMPTeamsDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
+ break;
+ case OMPD_target:
+ Res = ActOnOpenMPTargetDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ break;
case OMPD_threadprivate:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
@@ -1535,10 +1878,16 @@ class OpenMPIterationSpaceChecker {
SourceLocation DefaultLoc;
/// \brief A location for diagnostics (when increment is not compatible).
SourceLocation ConditionLoc;
+ /// \brief A source location for referring to loop init later.
+ SourceRange InitSrcRange;
/// \brief A source location for referring to condition later.
SourceRange ConditionSrcRange;
+ /// \brief A source location for referring to increment later.
+ SourceRange IncrementSrcRange;
/// \brief Loop variable.
VarDecl *Var;
+ /// \brief Reference to loop variable.
+ DeclRefExpr *VarRef;
/// \brief Lower bound (initializer for the var).
Expr *LB;
/// \brief Upper bound.
@@ -1559,9 +1908,10 @@ class OpenMPIterationSpaceChecker {
public:
OpenMPIterationSpaceChecker(Sema &SemaRef, SourceLocation DefaultLoc)
: SemaRef(SemaRef), DefaultLoc(DefaultLoc), ConditionLoc(DefaultLoc),
- ConditionSrcRange(SourceRange()), Var(nullptr), LB(nullptr),
- UB(nullptr), Step(nullptr), TestIsLessOp(false), TestIsStrictOp(false),
- SubtractStep(false) {}
+ InitSrcRange(SourceRange()), ConditionSrcRange(SourceRange()),
+ IncrementSrcRange(SourceRange()), Var(nullptr), VarRef(nullptr),
+ LB(nullptr), UB(nullptr), Step(nullptr), TestIsLessOp(false),
+ TestIsStrictOp(false), SubtractStep(false) {}
/// \brief Check init-expr for canonical loop form and save loop counter
/// variable - #Var and its initialization value - #LB.
bool CheckInit(Stmt *S);
@@ -1573,6 +1923,24 @@ public:
bool CheckInc(Expr *S);
/// \brief Return the loop counter variable.
VarDecl *GetLoopVar() const { return Var; }
+ /// \brief Return the reference expression to loop counter variable.
+ DeclRefExpr *GetLoopVarRefExpr() const { return VarRef; }
+ /// \brief Source range of the loop init.
+ SourceRange GetInitSrcRange() const { return InitSrcRange; }
+ /// \brief Source range of the loop condition.
+ SourceRange GetConditionSrcRange() const { return ConditionSrcRange; }
+ /// \brief Source range of the loop increment.
+ SourceRange GetIncrementSrcRange() const { return IncrementSrcRange; }
+ /// \brief True if the step should be subtracted.
+ bool ShouldSubtractStep() const { return SubtractStep; }
+ /// \brief Build the expression to calculate the number of iterations.
+ Expr *BuildNumIterations(Scope *S, const bool LimitedType) const;
+ /// \brief Build reference expression to the counter be used for codegen.
+ Expr *BuildCounterVar() const;
+ /// \brief Build initization of the counter be used for codegen.
+ Expr *BuildCounterInit() const;
+ /// \brief Build step of the counter be used for codegen.
+ Expr *BuildCounterStep() const;
/// \brief Return true if any expression is dependent.
bool Dependent() const;
@@ -1581,7 +1949,7 @@ private:
/// expression.
bool CheckIncRHS(Expr *RHS);
/// \brief Helper to set loop counter variable and its initializer.
- bool SetVarAndLB(VarDecl *NewVar, Expr *NewLB);
+ bool SetVarAndLB(VarDecl *NewVar, DeclRefExpr *NewVarRefExpr, Expr *NewLB);
/// \brief Helper to set upper bound.
bool SetUB(Expr *NewUB, bool LessOp, bool StrictOp, const SourceRange &SR,
const SourceLocation &SL);
@@ -1598,13 +1966,16 @@ bool OpenMPIterationSpaceChecker::Dependent() const {
(UB && UB->isValueDependent()) || (Step && Step->isValueDependent());
}
-bool OpenMPIterationSpaceChecker::SetVarAndLB(VarDecl *NewVar, Expr *NewLB) {
+bool OpenMPIterationSpaceChecker::SetVarAndLB(VarDecl *NewVar,
+ DeclRefExpr *NewVarRefExpr,
+ Expr *NewLB) {
// State consistency checking to ensure correct usage.
- assert(Var == nullptr && LB == nullptr && UB == nullptr && Step == nullptr &&
- !TestIsLessOp && !TestIsStrictOp);
+ assert(Var == nullptr && LB == nullptr && VarRef == nullptr &&
+ UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
if (!NewVar || !NewLB)
return true;
Var = NewVar;
+ VarRef = NewVarRefExpr;
LB = NewLB;
return false;
}
@@ -1655,10 +2026,12 @@ bool OpenMPIterationSpaceChecker::SetStep(Expr *NewStep, bool Subtract) {
bool IsUnsigned = !NewStep->getType()->hasSignedIntegerRepresentation();
bool IsConstNeg =
IsConstant && Result.isSigned() && (Subtract != Result.isNegative());
+ bool IsConstPos =
+ IsConstant && Result.isSigned() && (Subtract == Result.isNegative());
bool IsConstZero = IsConstant && !Result.getBoolValue();
if (UB && (IsConstZero ||
(TestIsLessOp ? (IsConstNeg || (IsUnsigned && Subtract))
- : (!IsConstNeg || (IsUnsigned && !Subtract))))) {
+ : (IsConstPos || (IsUnsigned && !Subtract))))) {
SemaRef.Diag(NewStep->getExprLoc(),
diag::err_omp_loop_incr_not_compatible)
<< Var << TestIsLessOp << NewStep->getSourceRange();
@@ -1667,6 +2040,11 @@ bool OpenMPIterationSpaceChecker::SetStep(Expr *NewStep, bool Subtract) {
<< TestIsLessOp << ConditionSrcRange;
return true;
}
+ if (TestIsLessOp == Subtract) {
+ NewStep = SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus,
+ NewStep).get();
+ Subtract = !Subtract;
+ }
}
Step = NewStep;
@@ -1687,12 +2065,14 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S) {
SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_init);
return true;
}
+ InitSrcRange = S->getSourceRange();
if (Expr *E = dyn_cast<Expr>(S))
S = E->IgnoreParens();
if (auto BO = dyn_cast<BinaryOperator>(S)) {
if (BO->getOpcode() == BO_Assign)
if (auto DRE = dyn_cast<DeclRefExpr>(BO->getLHS()->IgnoreParens()))
- return SetVarAndLB(dyn_cast<VarDecl>(DRE->getDecl()), BO->getLHS());
+ return SetVarAndLB(dyn_cast<VarDecl>(DRE->getDecl()), DRE,
+ BO->getRHS());
} else if (auto DS = dyn_cast<DeclStmt>(S)) {
if (DS->isSingleDecl()) {
if (auto Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
@@ -1702,14 +2082,15 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S) {
SemaRef.Diag(S->getLocStart(),
diag::ext_omp_loop_not_canonical_init)
<< S->getSourceRange();
- return SetVarAndLB(Var, Var->getInit());
+ return SetVarAndLB(Var, nullptr, Var->getInit());
}
}
}
} else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S))
if (CE->getOperator() == OO_Equal)
if (auto DRE = dyn_cast<DeclRefExpr>(CE->getArg(0)))
- return SetVarAndLB(dyn_cast<VarDecl>(DRE->getDecl()), CE->getArg(1));
+ return SetVarAndLB(dyn_cast<VarDecl>(DRE->getDecl()), DRE,
+ CE->getArg(1));
SemaRef.Diag(S->getLocStart(), diag::err_omp_loop_not_canonical_init)
<< S->getSourceRange();
@@ -1833,6 +2214,7 @@ bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) {
SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_incr) << Var;
return true;
}
+ IncrementSrcRange = S->getSourceRange();
S = S->IgnoreParens();
if (auto UO = dyn_cast<UnaryOperator>(S)) {
if (UO->isIncrementDecrementOp() && GetInitVarDecl(UO->getSubExpr()) == Var)
@@ -1882,6 +2264,115 @@ bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) {
<< S->getSourceRange() << Var;
return true;
}
+
+/// \brief Build the expression to calculate the number of iterations.
+Expr *
+OpenMPIterationSpaceChecker::BuildNumIterations(Scope *S,
+ const bool LimitedType) const {
+ ExprResult Diff;
+ if (Var->getType()->isIntegerType() || Var->getType()->isPointerType() ||
+ SemaRef.getLangOpts().CPlusPlus) {
+ // Upper - Lower
+ Expr *Upper = TestIsLessOp ? UB : LB;
+ Expr *Lower = TestIsLessOp ? LB : UB;
+
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
+
+ if (!Diff.isUsable() && Var->getType()->getAsCXXRecordDecl()) {
+ // BuildBinOp already emitted error, this one is to point user to upper
+ // and lower bound, and to tell what is passed to 'operator-'.
+ SemaRef.Diag(Upper->getLocStart(), diag::err_omp_loop_diff_cxx)
+ << Upper->getSourceRange() << Lower->getSourceRange();
+ return nullptr;
+ }
+ }
+
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // Upper - Lower [- 1]
+ if (TestIsStrictOp)
+ Diff = SemaRef.BuildBinOp(
+ S, DefaultLoc, BO_Sub, Diff.get(),
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // Upper - Lower [- 1] + Step
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(),
+ Step->IgnoreImplicit());
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // Parentheses (for dumping/debugging purposes only).
+ Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // (Upper - Lower [- 1] + Step) / Step
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(),
+ Step->IgnoreImplicit());
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // OpenMP runtime requires 32-bit or 64-bit loop variables.
+ if (LimitedType) {
+ auto &C = SemaRef.Context;
+ QualType Type = Diff.get()->getType();
+ unsigned NewSize = (C.getTypeSize(Type) > 32) ? 64 : 32;
+ if (NewSize != C.getTypeSize(Type)) {
+ if (NewSize < C.getTypeSize(Type)) {
+ assert(NewSize == 64 && "incorrect loop var size");
+ SemaRef.Diag(DefaultLoc, diag::warn_omp_loop_64_bit_var)
+ << InitSrcRange << ConditionSrcRange;
+ }
+ QualType NewType = C.getIntTypeForBitwidth(
+ NewSize, Type->hasSignedIntegerRepresentation());
+ Diff = SemaRef.PerformImplicitConversion(Diff.get(), NewType,
+ Sema::AA_Converting, true);
+ if (!Diff.isUsable())
+ return nullptr;
+ }
+ }
+
+ return Diff.get();
+}
+
+/// \brief Build reference expression to the counter be used for codegen.
+Expr *OpenMPIterationSpaceChecker::BuildCounterVar() const {
+ return DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(),
+ GetIncrementSrcRange().getBegin(), Var, false,
+ DefaultLoc, Var->getType(), VK_LValue);
+}
+
+/// \brief Build initization of the counter be used for codegen.
+Expr *OpenMPIterationSpaceChecker::BuildCounterInit() const { return LB; }
+
+/// \brief Build step of the counter be used for codegen.
+Expr *OpenMPIterationSpaceChecker::BuildCounterStep() const { return Step; }
+
+/// \brief Iteration space of a single for loop.
+struct LoopIterationSpace {
+ /// \brief This expression calculates the number of iterations in the loop.
+ /// It is always possible to calculate it before starting the loop.
+ Expr *NumIterations;
+ /// \brief The loop counter variable.
+ Expr *CounterVar;
+ /// \brief This is initializer for the initial value of #CounterVar.
+ Expr *CounterInit;
+ /// \brief This is step for the #CounterVar used to generate its update:
+ /// #CounterVar = #CounterInit + #CounterStep * CurrentIteration.
+ Expr *CounterStep;
+ /// \brief Should step be subtracted?
+ bool Subtract;
+ /// \brief Source range of the loop init.
+ SourceRange InitSrcRange;
+ /// \brief Source range of the loop condition.
+ SourceRange CondSrcRange;
+ /// \brief Source range of the loop increment.
+ SourceRange IncSrcRange;
+};
+
} // namespace
/// \brief Called on a for stmt to check and extract its iteration space
@@ -1890,7 +2381,8 @@ static bool CheckOpenMPIterationSpace(
OpenMPDirectiveKind DKind, Stmt *S, Sema &SemaRef, DSAStackTy &DSA,
unsigned CurrentNestedLoopCount, unsigned NestedLoopCount,
Expr *NestedLoopCountExpr,
- llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA,
+ LoopIterationSpace &ResultIterSpace) {
// OpenMP [2.6, Canonical Loop Form]
// for (init-expr; test-expr; incr-expr) structured-block
auto For = dyn_cast_or_null<ForStmt>(S);
@@ -1943,8 +2435,7 @@ static bool CheckOpenMPIterationSpace(
// that is the increment of the associated for-loop.
// Exclude loop var from the list of variables with implicitly defined data
// sharing attributes.
- while (VarsWithImplicitDSA.count(Var) > 0)
- VarsWithImplicitDSA.erase(Var);
+ VarsWithImplicitDSA.erase(Var);
// OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced in
// a Construct, C/C++].
@@ -1954,25 +2445,40 @@ static bool CheckOpenMPIterationSpace(
// The loop iteration variable(s) in the associated for-loop(s) of a for or
// parallel for construct may be listed in a private or lastprivate clause.
DSAStackTy::DSAVarData DVar = DSA.getTopDSA(Var, false);
+ auto LoopVarRefExpr = ISC.GetLoopVarRefExpr();
+ // If LoopVarRefExpr is nullptr it means the corresponding loop variable is
+ // declared in the loop and it is predetermined as a private.
auto PredeterminedCKind =
isOpenMPSimdDirective(DKind)
? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate)
: OMPC_private;
if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
DVar.CKind != PredeterminedCKind) ||
- (isOpenMPWorksharingDirective(DKind) && DVar.CKind != OMPC_unknown &&
- DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
+ (isOpenMPWorksharingDirective(DKind) && !isOpenMPSimdDirective(DKind) &&
+ DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_private &&
+ DVar.CKind != OMPC_lastprivate)) &&
(DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) {
SemaRef.Diag(Init->getLocStart(), diag::err_omp_loop_var_dsa)
<< getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind)
<< getOpenMPClauseName(PredeterminedCKind);
ReportOriginalDSA(SemaRef, &DSA, Var, DVar, true);
HasErrors = true;
- } else {
+ } else if (LoopVarRefExpr != nullptr) {
// Make the loop iteration variable private (for worksharing constructs),
// linear (for simd directives with the only one associated loop) or
// lastprivate (for simd directives with several collapsed loops).
- DSA.addDSA(Var, nullptr, PredeterminedCKind);
+ // FIXME: the next check and error message must be removed once the
+ // capturing of global variables in loops is fixed.
+ if (DVar.CKind == OMPC_unknown)
+ DVar = DSA.hasDSA(Var, isOpenMPPrivate, MatchesAlways(),
+ /*FromParent=*/false);
+ if (!Var->hasLocalStorage() && DVar.CKind == OMPC_unknown) {
+ SemaRef.Diag(Init->getLocStart(), diag::err_omp_global_loop_var_dsa)
+ << getOpenMPClauseName(PredeterminedCKind)
+ << getOpenMPDirectiveName(DKind);
+ HasErrors = true;
+ } else
+ DSA.addDSA(Var, LoopVarRefExpr, PredeterminedCKind);
}
assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars");
@@ -1983,35 +2489,97 @@ static bool CheckOpenMPIterationSpace(
// Check incr-expr.
HasErrors |= ISC.CheckInc(For->getInc());
- if (ISC.Dependent())
+ if (ISC.Dependent() || SemaRef.CurContext->isDependentContext() || HasErrors)
return HasErrors;
- // FIXME: Build loop's iteration space representation.
+ // Build the loop's iteration space representation.
+ ResultIterSpace.NumIterations = ISC.BuildNumIterations(
+ DSA.getCurScope(), /* LimitedType */ isOpenMPWorksharingDirective(DKind));
+ ResultIterSpace.CounterVar = ISC.BuildCounterVar();
+ ResultIterSpace.CounterInit = ISC.BuildCounterInit();
+ ResultIterSpace.CounterStep = ISC.BuildCounterStep();
+ ResultIterSpace.InitSrcRange = ISC.GetInitSrcRange();
+ ResultIterSpace.CondSrcRange = ISC.GetConditionSrcRange();
+ ResultIterSpace.IncSrcRange = ISC.GetIncrementSrcRange();
+ ResultIterSpace.Subtract = ISC.ShouldSubtractStep();
+
+ HasErrors |= (ResultIterSpace.NumIterations == nullptr ||
+ ResultIterSpace.CounterVar == nullptr ||
+ ResultIterSpace.CounterInit == nullptr ||
+ ResultIterSpace.CounterStep == nullptr);
+
return HasErrors;
}
-/// \brief A helper routine to skip no-op (attributed, compound) stmts get the
-/// next nested for loop. If \a IgnoreCaptured is true, it skips captured stmt
-/// to get the first for loop.
-static Stmt *IgnoreContainerStmts(Stmt *S, bool IgnoreCaptured) {
- if (IgnoreCaptured)
- if (auto CapS = dyn_cast_or_null<CapturedStmt>(S))
- S = CapS->getCapturedStmt();
- // OpenMP [2.8.1, simd construct, Restrictions]
- // All loops associated with the construct must be perfectly nested; that is,
- // there must be no intervening code nor any OpenMP directive between any two
- // loops.
- while (true) {
- if (auto AS = dyn_cast_or_null<AttributedStmt>(S))
- S = AS->getSubStmt();
- else if (auto CS = dyn_cast_or_null<CompoundStmt>(S)) {
- if (CS->size() != 1)
- break;
- S = CS->body_back();
- } else
- break;
- }
- return S;
+/// \brief Build a variable declaration for OpenMP loop iteration variable.
+static VarDecl *BuildVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type,
+ StringRef Name) {
+ DeclContext *DC = SemaRef.CurContext;
+ IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
+ TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
+ VarDecl *Decl =
+ VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
+ Decl->setImplicit();
+ return Decl;
+}
+
+/// \brief Build 'VarRef = Start + Iter * Step'.
+static ExprResult BuildCounterUpdate(Sema &SemaRef, Scope *S,
+ SourceLocation Loc, ExprResult VarRef,
+ ExprResult Start, ExprResult Iter,
+ ExprResult Step, bool Subtract) {
+ // Add parentheses (for debugging purposes only).
+ Iter = SemaRef.ActOnParenExpr(Loc, Loc, Iter.get());
+ if (!VarRef.isUsable() || !Start.isUsable() || !Iter.isUsable() ||
+ !Step.isUsable())
+ return ExprError();
+
+ ExprResult Update = SemaRef.BuildBinOp(S, Loc, BO_Mul, Iter.get(),
+ Step.get()->IgnoreImplicit());
+ if (!Update.isUsable())
+ return ExprError();
+
+ // Build 'VarRef = Start + Iter * Step'.
+ Update = SemaRef.BuildBinOp(S, Loc, (Subtract ? BO_Sub : BO_Add),
+ Start.get()->IgnoreImplicit(), Update.get());
+ if (!Update.isUsable())
+ return ExprError();
+
+ Update = SemaRef.PerformImplicitConversion(
+ Update.get(), VarRef.get()->getType(), Sema::AA_Converting, true);
+ if (!Update.isUsable())
+ return ExprError();
+
+ Update = SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), Update.get());
+ return Update;
+}
+
+/// \brief Convert integer expression \a E to make it have at least \a Bits
+/// bits.
+static ExprResult WidenIterationCount(unsigned Bits, Expr *E,
+ Sema &SemaRef) {
+ if (E == nullptr)
+ return ExprError();
+ auto &C = SemaRef.Context;
+ QualType OldType = E->getType();
+ unsigned HasBits = C.getTypeSize(OldType);
+ if (HasBits >= Bits)
+ return ExprResult(E);
+ // OK to convert to signed, because new type has more bits than old.
+ QualType NewType = C.getIntTypeForBitwidth(Bits, /* Signed */ true);
+ return SemaRef.PerformImplicitConversion(E, NewType, Sema::AA_Converting,
+ true);
+}
+
+/// \brief Check if the given expression \a E is a constant integer that fits
+/// into \a Bits bits.
+static bool FitsInto(unsigned Bits, bool Signed, Expr *E, Sema &SemaRef) {
+ if (E == nullptr)
+ return false;
+ llvm::APSInt Result;
+ if (E->isIntegerConstantExpr(Result, SemaRef.Context))
+ return Signed ? Result.isSignedIntN(Bits) : Result.isIntN(Bits);
+ return false;
}
/// \brief Called on a for stmt to check itself and nested loops (if any).
@@ -2020,7 +2588,8 @@ static Stmt *IgnoreContainerStmts(Stmt *S, bool IgnoreCaptured) {
static unsigned
CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
Stmt *AStmt, Sema &SemaRef, DSAStackTy &DSA,
- llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA,
+ OMPLoopDirective::HelperExprs &Built) {
unsigned NestedLoopCount = 1;
if (NestedLoopCountExpr) {
// Found 'collapse' clause - calculate collapse number.
@@ -2030,18 +2599,336 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
}
// This is helper routine for loop directives (e.g., 'for', 'simd',
// 'for simd', etc.).
- Stmt *CurStmt = IgnoreContainerStmts(AStmt, true);
+ SmallVector<LoopIterationSpace, 4> IterSpaces;
+ IterSpaces.resize(NestedLoopCount);
+ Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true);
for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
if (CheckOpenMPIterationSpace(DKind, CurStmt, SemaRef, DSA, Cnt,
NestedLoopCount, NestedLoopCountExpr,
- VarsWithImplicitDSA))
+ VarsWithImplicitDSA, IterSpaces[Cnt]))
return 0;
// Move on to the next nested for loop, or to the loop body.
- CurStmt = IgnoreContainerStmts(cast<ForStmt>(CurStmt)->getBody(), false);
+ // OpenMP [2.8.1, simd construct, Restrictions]
+ // All loops associated with the construct must be perfectly nested; that
+ // is, there must be no intervening code nor any OpenMP directive between
+ // any two loops.
+ CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
+ }
+
+ Built.clear(/* size */ NestedLoopCount);
+
+ if (SemaRef.CurContext->isDependentContext())
+ return NestedLoopCount;
+
+ // An example of what is generated for the following code:
+ //
+ // #pragma omp simd collapse(2)
+ // for (i = 0; i < NI; ++i)
+ // for (j = J0; j < NJ; j+=2) {
+ // <loop body>
+ // }
+ //
+ // We generate the code below.
+ // Note: the loop body may be outlined in CodeGen.
+ // Note: some counters may be C++ classes, operator- is used to find number of
+ // iterations and operator+= to calculate counter value.
+ // Note: decltype(NumIterations) must be integer type (in 'omp for', only i32
+ // or i64 is currently supported).
+ //
+ // #define NumIterations (NI * ((NJ - J0 - 1 + 2) / 2))
+ // for (int[32|64]_t IV = 0; IV < NumIterations; ++IV ) {
+ // .local.i = IV / ((NJ - J0 - 1 + 2) / 2);
+ // .local.j = J0 + (IV % ((NJ - J0 - 1 + 2) / 2)) * 2;
+ // // similar updates for vars in clauses (e.g. 'linear')
+ // <loop body (using local i and j)>
+ // }
+ // i = NI; // assign final values of counters
+ // j = NJ;
+ //
+
+ // Last iteration number is (I1 * I2 * ... In) - 1, where I1, I2 ... In are
+ // the iteration counts of the collapsed for loops.
+ auto N0 = IterSpaces[0].NumIterations;
+ ExprResult LastIteration32 = WidenIterationCount(32 /* Bits */, N0, SemaRef);
+ ExprResult LastIteration64 = WidenIterationCount(64 /* Bits */, N0, SemaRef);
+
+ if (!LastIteration32.isUsable() || !LastIteration64.isUsable())
+ return NestedLoopCount;
+
+ auto &C = SemaRef.Context;
+ bool AllCountsNeedLessThan32Bits = C.getTypeSize(N0->getType()) < 32;
+
+ Scope *CurScope = DSA.getCurScope();
+ for (unsigned Cnt = 1; Cnt < NestedLoopCount; ++Cnt) {
+ auto N = IterSpaces[Cnt].NumIterations;
+ AllCountsNeedLessThan32Bits &= C.getTypeSize(N->getType()) < 32;
+ if (LastIteration32.isUsable())
+ LastIteration32 = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_Mul,
+ LastIteration32.get(), N);
+ if (LastIteration64.isUsable())
+ LastIteration64 = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_Mul,
+ LastIteration64.get(), N);
+ }
+
+ // Choose either the 32-bit or 64-bit version.
+ ExprResult LastIteration = LastIteration64;
+ if (LastIteration32.isUsable() &&
+ C.getTypeSize(LastIteration32.get()->getType()) == 32 &&
+ (AllCountsNeedLessThan32Bits || NestedLoopCount == 1 ||
+ FitsInto(
+ 32 /* Bits */,
+ LastIteration32.get()->getType()->hasSignedIntegerRepresentation(),
+ LastIteration64.get(), SemaRef)))
+ LastIteration = LastIteration32;
+
+ if (!LastIteration.isUsable())
+ return 0;
+
+ // Save the number of iterations.
+ ExprResult NumIterations = LastIteration;
+ {
+ LastIteration = SemaRef.BuildBinOp(
+ CurScope, SourceLocation(), BO_Sub, LastIteration.get(),
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ if (!LastIteration.isUsable())
+ return 0;
+ }
+
+ // Calculate the last iteration number beforehand instead of doing this on
+ // each iteration. Do not do this if the number of iterations may be kfold-ed.
+ llvm::APSInt Result;
+ bool IsConstant =
+ LastIteration.get()->isIntegerConstantExpr(Result, SemaRef.Context);
+ ExprResult CalcLastIteration;
+ if (!IsConstant) {
+ SourceLocation SaveLoc;
+ VarDecl *SaveVar =
+ BuildVarDecl(SemaRef, SaveLoc, LastIteration.get()->getType(),
+ ".omp.last.iteration");
+ ExprResult SaveRef = SemaRef.BuildDeclRefExpr(
+ SaveVar, LastIteration.get()->getType(), VK_LValue, SaveLoc);
+ CalcLastIteration = SemaRef.BuildBinOp(CurScope, SaveLoc, BO_Assign,
+ SaveRef.get(), LastIteration.get());
+ LastIteration = SaveRef;
+
+ // Prepare SaveRef + 1.
+ NumIterations = SemaRef.BuildBinOp(
+ CurScope, SaveLoc, BO_Add, SaveRef.get(),
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ if (!NumIterations.isUsable())
+ return 0;
+ }
+
+ SourceLocation InitLoc = IterSpaces[0].InitSrcRange.getBegin();
+
+ // Precondition tests if there is at least one iteration (LastIteration > 0).
+ ExprResult PreCond = SemaRef.BuildBinOp(
+ CurScope, InitLoc, BO_GT, LastIteration.get(),
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get());
+
+ QualType VType = LastIteration.get()->getType();
+ // Build variables passed into runtime, nesessary for worksharing directives.
+ ExprResult LB, UB, IL, ST, EUB;
+ if (isOpenMPWorksharingDirective(DKind)) {
+ // Lower bound variable, initialized with zero.
+ VarDecl *LBDecl = BuildVarDecl(SemaRef, InitLoc, VType, ".omp.lb");
+ LB = SemaRef.BuildDeclRefExpr(LBDecl, VType, VK_LValue, InitLoc);
+ SemaRef.AddInitializerToDecl(
+ LBDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
+ /*DirectInit*/ false, /*TypeMayContainAuto*/ false);
+
+ // Upper bound variable, initialized with last iteration number.
+ VarDecl *UBDecl = BuildVarDecl(SemaRef, InitLoc, VType, ".omp.ub");
+ UB = SemaRef.BuildDeclRefExpr(UBDecl, VType, VK_LValue, InitLoc);
+ SemaRef.AddInitializerToDecl(UBDecl, LastIteration.get(),
+ /*DirectInit*/ false,
+ /*TypeMayContainAuto*/ false);
+
+ // A 32-bit variable-flag where runtime returns 1 for the last iteration.
+ // This will be used to implement clause 'lastprivate'.
+ QualType Int32Ty = SemaRef.Context.getIntTypeForBitwidth(32, true);
+ VarDecl *ILDecl = BuildVarDecl(SemaRef, InitLoc, Int32Ty, ".omp.is_last");
+ IL = SemaRef.BuildDeclRefExpr(ILDecl, Int32Ty, VK_LValue, InitLoc);
+ SemaRef.AddInitializerToDecl(
+ ILDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
+ /*DirectInit*/ false, /*TypeMayContainAuto*/ false);
+
+ // Stride variable returned by runtime (we initialize it to 1 by default).
+ VarDecl *STDecl = BuildVarDecl(SemaRef, InitLoc, VType, ".omp.stride");
+ ST = SemaRef.BuildDeclRefExpr(STDecl, VType, VK_LValue, InitLoc);
+ SemaRef.AddInitializerToDecl(
+ STDecl, SemaRef.ActOnIntegerConstant(InitLoc, 1).get(),
+ /*DirectInit*/ false, /*TypeMayContainAuto*/ false);
+
+ // Build expression: UB = min(UB, LastIteration)
+ // It is nesessary for CodeGen of directives with static scheduling.
+ ExprResult IsUBGreater = SemaRef.BuildBinOp(CurScope, InitLoc, BO_GT,
+ UB.get(), LastIteration.get());
+ ExprResult CondOp = SemaRef.ActOnConditionalOp(
+ InitLoc, InitLoc, IsUBGreater.get(), LastIteration.get(), UB.get());
+ EUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, UB.get(),
+ CondOp.get());
+ EUB = SemaRef.ActOnFinishFullExpr(EUB.get());
+ }
+
+ // Build the iteration variable and its initialization before loop.
+ ExprResult IV;
+ ExprResult Init;
+ {
+ VarDecl *IVDecl = BuildVarDecl(SemaRef, InitLoc, VType, ".omp.iv");
+ IV = SemaRef.BuildDeclRefExpr(IVDecl, VType, VK_LValue, InitLoc);
+ Expr *RHS = isOpenMPWorksharingDirective(DKind)
+ ? LB.get()
+ : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
+ Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS);
+ Init = SemaRef.ActOnFinishFullExpr(Init.get());
}
- // FIXME: Build resulting iteration space for IR generation (collapsing
- // iteration spaces when loop count > 1 ('collapse' clause)).
+ // Loop condition (IV < NumIterations) or (IV <= UB) for worksharing loops.
+ SourceLocation CondLoc;
+ ExprResult Cond =
+ isOpenMPWorksharingDirective(DKind)
+ ? SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get())
+ : SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
+ NumIterations.get());
+ // Loop condition with 1 iteration separated (IV < LastIteration)
+ ExprResult SeparatedCond = SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT,
+ IV.get(), LastIteration.get());
+
+ // Loop increment (IV = IV + 1)
+ SourceLocation IncLoc;
+ ExprResult Inc =
+ SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, IV.get(),
+ SemaRef.ActOnIntegerConstant(IncLoc, 1).get());
+ if (!Inc.isUsable())
+ return 0;
+ Inc = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, IV.get(), Inc.get());
+ Inc = SemaRef.ActOnFinishFullExpr(Inc.get());
+ if (!Inc.isUsable())
+ return 0;
+
+ // Increments for worksharing loops (LB = LB + ST; UB = UB + ST).
+ // Used for directives with static scheduling.
+ ExprResult NextLB, NextUB;
+ if (isOpenMPWorksharingDirective(DKind)) {
+ // LB + ST
+ NextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, LB.get(), ST.get());
+ if (!NextLB.isUsable())
+ return 0;
+ // LB = LB + ST
+ NextLB =
+ SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, LB.get(), NextLB.get());
+ NextLB = SemaRef.ActOnFinishFullExpr(NextLB.get());
+ if (!NextLB.isUsable())
+ return 0;
+ // UB + ST
+ NextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, UB.get(), ST.get());
+ if (!NextUB.isUsable())
+ return 0;
+ // UB = UB + ST
+ NextUB =
+ SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, UB.get(), NextUB.get());
+ NextUB = SemaRef.ActOnFinishFullExpr(NextUB.get());
+ if (!NextUB.isUsable())
+ return 0;
+ }
+
+ // Build updates and final values of the loop counters.
+ bool HasErrors = false;
+ Built.Counters.resize(NestedLoopCount);
+ Built.Updates.resize(NestedLoopCount);
+ Built.Finals.resize(NestedLoopCount);
+ {
+ ExprResult Div;
+ // Go from inner nested loop to outer.
+ for (int Cnt = NestedLoopCount - 1; Cnt >= 0; --Cnt) {
+ LoopIterationSpace &IS = IterSpaces[Cnt];
+ SourceLocation UpdLoc = IS.IncSrcRange.getBegin();
+ // Build: Iter = (IV / Div) % IS.NumIters
+ // where Div is product of previous iterations' IS.NumIters.
+ ExprResult Iter;
+ if (Div.isUsable()) {
+ Iter =
+ SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Div, IV.get(), Div.get());
+ } else {
+ Iter = IV;
+ assert((Cnt == (int)NestedLoopCount - 1) &&
+ "unusable div expected on first iteration only");
+ }
+
+ if (Cnt != 0 && Iter.isUsable())
+ Iter = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Rem, Iter.get(),
+ IS.NumIterations);
+ if (!Iter.isUsable()) {
+ HasErrors = true;
+ break;
+ }
+
+ // Build update: IS.CounterVar = IS.Start + Iter * IS.Step
+ ExprResult Update =
+ BuildCounterUpdate(SemaRef, CurScope, UpdLoc, IS.CounterVar,
+ IS.CounterInit, Iter, IS.CounterStep, IS.Subtract);
+ if (!Update.isUsable()) {
+ HasErrors = true;
+ break;
+ }
+
+ // Build final: IS.CounterVar = IS.Start + IS.NumIters * IS.Step
+ ExprResult Final = BuildCounterUpdate(
+ SemaRef, CurScope, UpdLoc, IS.CounterVar, IS.CounterInit,
+ IS.NumIterations, IS.CounterStep, IS.Subtract);
+ if (!Final.isUsable()) {
+ HasErrors = true;
+ break;
+ }
+
+ // Build Div for the next iteration: Div <- Div * IS.NumIters
+ if (Cnt != 0) {
+ if (Div.isUnset())
+ Div = IS.NumIterations;
+ else
+ Div = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Div.get(),
+ IS.NumIterations);
+
+ // Add parentheses (for debugging purposes only).
+ if (Div.isUsable())
+ Div = SemaRef.ActOnParenExpr(UpdLoc, UpdLoc, Div.get());
+ if (!Div.isUsable()) {
+ HasErrors = true;
+ break;
+ }
+ }
+ if (!Update.isUsable() || !Final.isUsable()) {
+ HasErrors = true;
+ break;
+ }
+ // Save results
+ Built.Counters[Cnt] = IS.CounterVar;
+ Built.Updates[Cnt] = Update.get();
+ Built.Finals[Cnt] = Final.get();
+ }
+ }
+
+ if (HasErrors)
+ return 0;
+
+ // Save results
+ Built.IterationVarRef = IV.get();
+ Built.LastIteration = LastIteration.get();
+ Built.CalcLastIteration = CalcLastIteration.get();
+ Built.PreCond = PreCond.get();
+ Built.Cond = Cond.get();
+ Built.SeparatedCond = SeparatedCond.get();
+ Built.Init = Init.get();
+ Built.Inc = Inc.get();
+ Built.LB = LB.get();
+ Built.UB = UB.get();
+ Built.IL = IL.get();
+ Built.ST = ST.get();
+ Built.EUB = EUB.get();
+ Built.NLB = NextLB.get();
+ Built.NUB = NextUB.get();
+
return NestedLoopCount;
}
@@ -2060,32 +2947,60 @@ StmtResult Sema::ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse', it will define the nested loops number.
unsigned NestedLoopCount =
CheckOpenMPLoop(OMPD_simd, GetCollapseNumberExpr(Clauses), AStmt, *this,
- *DSAStack, VarsWithImplicitDSA);
+ *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp simd loop exprs were not built");
+
getCurFunction()->setHasBranchProtectedScope();
return OMPSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
- Clauses, AStmt);
+ Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse', it will define the nested loops number.
unsigned NestedLoopCount =
CheckOpenMPLoop(OMPD_for, GetCollapseNumberExpr(Clauses), AStmt, *this,
- *DSAStack, VarsWithImplicitDSA);
+ *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
getCurFunction()->setHasBranchProtectedScope();
return OMPForDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
- Clauses, AStmt);
+ Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPForSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse', it will define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_for_simd, GetCollapseNumberExpr(Clauses), AStmt,
+ *this, *DSAStack, VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for simd loop exprs were not built");
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPForSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
+ Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
@@ -2178,16 +3093,46 @@ StmtResult Sema::ActOnOpenMPParallelForDirective(
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
+ OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse', it will define the nested loops number.
unsigned NestedLoopCount =
CheckOpenMPLoop(OMPD_parallel_for, GetCollapseNumberExpr(Clauses), AStmt,
- *this, *DSAStack, VarsWithImplicitDSA);
+ *this, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp parallel for loop exprs were not built");
+
getCurFunction()->setHasBranchProtectedScope();
return OMPParallelForDirective::Create(Context, StartLoc, EndLoc,
- NestedLoopCount, Clauses, AStmt);
+ NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse', it will define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_parallel_for_simd, GetCollapseNumberExpr(Clauses),
+ AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPParallelForSimdDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
StmtResult
@@ -2264,6 +3209,271 @@ StmtResult Sema::ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
return OMPFlushDirective::Create(Context, StartLoc, EndLoc, Clauses);
}
+StmtResult Sema::ActOnOpenMPOrderedDirective(Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPOrderedDirective::Create(Context, StartLoc, EndLoc, AStmt);
+}
+
+StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ auto CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ // TODO further analysis of associated statements and clauses.
+ OpenMPClauseKind AtomicKind = OMPC_unknown;
+ SourceLocation AtomicKindLoc;
+ for (auto *C : Clauses) {
+ if (C->getClauseKind() == OMPC_read || C->getClauseKind() == OMPC_write ||
+ C->getClauseKind() == OMPC_update ||
+ C->getClauseKind() == OMPC_capture) {
+ if (AtomicKind != OMPC_unknown) {
+ Diag(C->getLocStart(), diag::err_omp_atomic_several_clauses)
+ << SourceRange(C->getLocStart(), C->getLocEnd());
+ Diag(AtomicKindLoc, diag::note_omp_atomic_previous_clause)
+ << getOpenMPClauseName(AtomicKind);
+ } else {
+ AtomicKind = C->getClauseKind();
+ AtomicKindLoc = C->getLocStart();
+ }
+ }
+ }
+
+ auto Body = CS->getCapturedStmt();
+ Expr *X = nullptr;
+ Expr *V = nullptr;
+ Expr *E = nullptr;
+ // OpenMP [2.12.6, atomic Construct]
+ // In the next expressions:
+ // * x and v (as applicable) are both l-value expressions with scalar type.
+ // * During the execution of an atomic region, multiple syntactic
+ // occurrences of x must designate the same storage location.
+ // * Neither of v and expr (as applicable) may access the storage location
+ // designated by x.
+ // * Neither of x and expr (as applicable) may access the storage location
+ // designated by v.
+ // * expr is an expression with scalar type.
+ // * binop is one of +, *, -, /, &, ^, |, <<, or >>.
+ // * binop, binop=, ++, and -- are not overloaded operators.
+ // * The expression x binop expr must be numerically equivalent to x binop
+ // (expr). This requirement is satisfied if the operators in expr have
+ // precedence greater than binop, or by using parentheses around expr or
+ // subexpressions of expr.
+ // * The expression expr binop x must be numerically equivalent to (expr)
+ // binop x. This requirement is satisfied if the operators in expr have
+ // precedence equal to or greater than binop, or by using parentheses around
+ // expr or subexpressions of expr.
+ // * For forms that allow multiple occurrences of x, the number of times
+ // that x is evaluated is unspecified.
+ enum {
+ NotAnExpression,
+ NotAnAssignmentOp,
+ NotAScalarType,
+ NotAnLValue,
+ NoError
+ } ErrorFound = NoError;
+ if (AtomicKind == OMPC_read) {
+ SourceLocation ErrorLoc, NoteLoc;
+ SourceRange ErrorRange, NoteRange;
+ // If clause is read:
+ // v = x;
+ if (auto AtomicBody = dyn_cast<Expr>(Body)) {
+ auto AtomicBinOp =
+ dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts());
+ if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
+ X = AtomicBinOp->getRHS()->IgnoreParenImpCasts();
+ V = AtomicBinOp->getLHS()->IgnoreParenImpCasts();
+ if ((X->isInstantiationDependent() || X->getType()->isScalarType()) &&
+ (V->isInstantiationDependent() || V->getType()->isScalarType())) {
+ if (!X->isLValue() || !V->isLValue()) {
+ auto NotLValueExpr = X->isLValue() ? V : X;
+ ErrorFound = NotAnLValue;
+ ErrorLoc = AtomicBinOp->getExprLoc();
+ ErrorRange = AtomicBinOp->getSourceRange();
+ NoteLoc = NotLValueExpr->getExprLoc();
+ NoteRange = NotLValueExpr->getSourceRange();
+ }
+ } else if (!X->isInstantiationDependent() ||
+ !V->isInstantiationDependent()) {
+ auto NotScalarExpr =
+ (X->isInstantiationDependent() || X->getType()->isScalarType())
+ ? V
+ : X;
+ ErrorFound = NotAScalarType;
+ ErrorLoc = AtomicBinOp->getExprLoc();
+ ErrorRange = AtomicBinOp->getSourceRange();
+ NoteLoc = NotScalarExpr->getExprLoc();
+ NoteRange = NotScalarExpr->getSourceRange();
+ }
+ } else {
+ ErrorFound = NotAnAssignmentOp;
+ ErrorLoc = AtomicBody->getExprLoc();
+ ErrorRange = AtomicBody->getSourceRange();
+ NoteLoc = AtomicBinOp ? AtomicBinOp->getOperatorLoc()
+ : AtomicBody->getExprLoc();
+ NoteRange = AtomicBinOp ? AtomicBinOp->getSourceRange()
+ : AtomicBody->getSourceRange();
+ }
+ } else {
+ ErrorFound = NotAnExpression;
+ NoteLoc = ErrorLoc = Body->getLocStart();
+ NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc);
+ }
+ if (ErrorFound != NoError) {
+ Diag(ErrorLoc, diag::err_omp_atomic_read_not_expression_statement)
+ << ErrorRange;
+ Diag(NoteLoc, diag::note_omp_atomic_read_write) << ErrorFound
+ << NoteRange;
+ return StmtError();
+ } else if (CurContext->isDependentContext())
+ V = X = nullptr;
+ } else if (AtomicKind == OMPC_write) {
+ SourceLocation ErrorLoc, NoteLoc;
+ SourceRange ErrorRange, NoteRange;
+ // If clause is write:
+ // x = expr;
+ if (auto AtomicBody = dyn_cast<Expr>(Body)) {
+ auto AtomicBinOp =
+ dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts());
+ if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
+ X = AtomicBinOp->getLHS()->IgnoreParenImpCasts();
+ E = AtomicBinOp->getRHS()->IgnoreParenImpCasts();
+ if ((X->isInstantiationDependent() || X->getType()->isScalarType()) &&
+ (E->isInstantiationDependent() || E->getType()->isScalarType())) {
+ if (!X->isLValue()) {
+ ErrorFound = NotAnLValue;
+ ErrorLoc = AtomicBinOp->getExprLoc();
+ ErrorRange = AtomicBinOp->getSourceRange();
+ NoteLoc = X->getExprLoc();
+ NoteRange = X->getSourceRange();
+ }
+ } else if (!X->isInstantiationDependent() ||
+ !E->isInstantiationDependent()) {
+ auto NotScalarExpr =
+ (X->isInstantiationDependent() || X->getType()->isScalarType())
+ ? E
+ : X;
+ ErrorFound = NotAScalarType;
+ ErrorLoc = AtomicBinOp->getExprLoc();
+ ErrorRange = AtomicBinOp->getSourceRange();
+ NoteLoc = NotScalarExpr->getExprLoc();
+ NoteRange = NotScalarExpr->getSourceRange();
+ }
+ } else {
+ ErrorFound = NotAnAssignmentOp;
+ ErrorLoc = AtomicBody->getExprLoc();
+ ErrorRange = AtomicBody->getSourceRange();
+ NoteLoc = AtomicBinOp ? AtomicBinOp->getOperatorLoc()
+ : AtomicBody->getExprLoc();
+ NoteRange = AtomicBinOp ? AtomicBinOp->getSourceRange()
+ : AtomicBody->getSourceRange();
+ }
+ } else {
+ ErrorFound = NotAnExpression;
+ NoteLoc = ErrorLoc = Body->getLocStart();
+ NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc);
+ }
+ if (ErrorFound != NoError) {
+ Diag(ErrorLoc, diag::err_omp_atomic_write_not_expression_statement)
+ << ErrorRange;
+ Diag(NoteLoc, diag::note_omp_atomic_read_write) << ErrorFound
+ << NoteRange;
+ return StmtError();
+ } else if (CurContext->isDependentContext())
+ E = X = nullptr;
+ } else if (AtomicKind == OMPC_update || AtomicKind == OMPC_unknown) {
+ if (!isa<Expr>(Body)) {
+ Diag(Body->getLocStart(),
+ diag::err_omp_atomic_update_not_expression_statement)
+ << (AtomicKind == OMPC_update);
+ return StmtError();
+ }
+ } else if (AtomicKind == OMPC_capture) {
+ if (isa<Expr>(Body) && !isa<BinaryOperator>(Body)) {
+ Diag(Body->getLocStart(),
+ diag::err_omp_atomic_capture_not_expression_statement);
+ return StmtError();
+ } else if (!isa<Expr>(Body) && !isa<CompoundStmt>(Body)) {
+ Diag(Body->getLocStart(),
+ diag::err_omp_atomic_capture_not_compound_statement);
+ return StmtError();
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPAtomicDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ X, V, E);
+}
+
+StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+
+ // OpenMP [2.16, Nesting of Regions]
+ // If specified, a teams construct must be contained within a target
+ // construct. That target construct must contain no statements or directives
+ // outside of the teams construct.
+ if (DSAStack->hasInnerTeamsRegion()) {
+ auto S = AStmt->IgnoreContainers(/*IgnoreCaptured*/ true);
+ bool OMPTeamsFound = true;
+ if (auto *CS = dyn_cast<CompoundStmt>(S)) {
+ auto I = CS->body_begin();
+ while (I != CS->body_end()) {
+ auto OED = dyn_cast<OMPExecutableDirective>(*I);
+ if (!OED || !isOpenMPTeamsDirective(OED->getDirectiveKind())) {
+ OMPTeamsFound = false;
+ break;
+ }
+ ++I;
+ }
+ assert(I != CS->body_end() && "Not found statement");
+ S = *I;
+ }
+ if (!OMPTeamsFound) {
+ Diag(StartLoc, diag::err_omp_target_contains_not_only_teams);
+ Diag(DSAStack->getInnerTeamsRegionLoc(),
+ diag::note_omp_nested_teams_construct_here);
+ Diag(S->getLocStart(), diag::note_omp_nested_statement_here)
+ << isa<OMPExecutableDirective>(S);
+ return StmtError();
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPTargetDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+}
+
+StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPTeamsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+}
+
OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -2303,6 +3513,11 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_mergeable:
case OMPC_threadprivate:
case OMPC_flush:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
case OMPC_unknown:
llvm_unreachable("Clause is not allowed.");
}
@@ -2345,7 +3560,6 @@ OMPClause *Sema::ActOnOpenMPFinalClause(Expr *Condition,
return new (Context) OMPFinalClause(ValExpr, StartLoc, LParenLoc, EndLoc);
}
-
ExprResult Sema::PerformOpenMPImplicitIntegerConversion(SourceLocation Loc,
Expr *Op) {
if (!Op)
@@ -2396,7 +3610,6 @@ OMPClause *Sema::ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation EndLoc) {
Expr *ValExpr = NumThreads;
if (!NumThreads->isValueDependent() && !NumThreads->isTypeDependent() &&
- !NumThreads->isInstantiationDependent() &&
!NumThreads->containsUnexpandedParameterPack()) {
SourceLocation NumThreadsLoc = NumThreads->getLocStart();
ExprResult Val =
@@ -2437,6 +3650,11 @@ ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E,
<< getOpenMPClauseName(CKind) << E->getSourceRange();
return ExprError();
}
+ if (CKind == OMPC_aligned && !Result.isPowerOf2()) {
+ Diag(E->getExprLoc(), diag::warn_omp_alignment_not_power_of_two)
+ << E->getSourceRange();
+ return ExprError();
+ }
return ICE;
}
@@ -2506,6 +3724,11 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_mergeable:
case OMPC_threadprivate:
case OMPC_flush:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
case OMPC_unknown:
llvm_unreachable("Clause is not allowed.");
}
@@ -2621,6 +3844,11 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_mergeable:
case OMPC_threadprivate:
case OMPC_flush:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
case OMPC_unknown:
llvm_unreachable("Clause is not allowed.");
}
@@ -2700,6 +3928,21 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_mergeable:
Res = ActOnOpenMPMergeableClause(StartLoc, EndLoc);
break;
+ case OMPC_read:
+ Res = ActOnOpenMPReadClause(StartLoc, EndLoc);
+ break;
+ case OMPC_write:
+ Res = ActOnOpenMPWriteClause(StartLoc, EndLoc);
+ break;
+ case OMPC_update:
+ Res = ActOnOpenMPUpdateClause(StartLoc, EndLoc);
+ break;
+ case OMPC_capture:
+ Res = ActOnOpenMPCaptureClause(StartLoc, EndLoc);
+ break;
+ case OMPC_seq_cst:
+ Res = ActOnOpenMPSeqCstClause(StartLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -2727,6 +3970,7 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
+ DSAStack->setOrderedRegion();
return new (Context) OMPOrderedClause(StartLoc, EndLoc);
}
@@ -2745,6 +3989,31 @@ OMPClause *Sema::ActOnOpenMPMergeableClause(SourceLocation StartLoc,
return new (Context) OMPMergeableClause(StartLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPReadClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPReadClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPWriteClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPWriteClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPUpdateClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPUpdateClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPCaptureClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPCaptureClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPSeqCstClause(StartLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc,
@@ -2798,6 +4067,11 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_untied:
case OMPC_mergeable:
case OMPC_threadprivate:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
case OMPC_unknown:
llvm_unreachable("Clause is not allowed.");
}
@@ -2809,11 +4083,13 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
+ SmallVector<Expr *, 8> PrivateCopies;
for (auto &RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP private clause.");
if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
// It will be analyzed later.
Vars.push_back(RefExpr);
+ PrivateCopies.push_back(nullptr);
continue;
}
@@ -2835,6 +4111,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
if (Type->isDependentType() || Type->isInstantiationDependentType()) {
// It will be analyzed later.
Vars.push_back(DE);
+ PrivateCopies.push_back(nullptr);
continue;
}
@@ -2860,54 +4137,8 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
// A variable of class type (or array thereof) that appears in a private
// clause requires an accessible, unambiguous default constructor for the
// class type.
- while (Type.getNonReferenceType()->isArrayType()) {
- Type = cast<ArrayType>(Type.getNonReferenceType().getTypePtr())
- ->getElementType();
- }
- CXXRecordDecl *RD = getLangOpts().CPlusPlus
- ? Type.getNonReferenceType()->getAsCXXRecordDecl()
- : nullptr;
- // FIXME This code must be replaced by actual constructing/destructing of
- // the private variable.
- if (RD) {
- CXXConstructorDecl *CD = LookupDefaultConstructor(RD);
- PartialDiagnostic PD =
- PartialDiagnostic(PartialDiagnostic::NullDiagnostic());
- if (!CD ||
- CheckConstructorAccess(ELoc, CD,
- InitializedEntity::InitializeTemporary(Type),
- CD->getAccess(), PD) == AR_inaccessible ||
- CD->isDeleted()) {
- Diag(ELoc, diag::err_omp_required_method)
- << getOpenMPClauseName(OMPC_private) << 0;
- bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
- VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
- Diag(RD->getLocation(), diag::note_previous_decl) << RD;
- continue;
- }
- MarkFunctionReferenced(ELoc, CD);
- DiagnoseUseOfDecl(CD, ELoc);
-
- CXXDestructorDecl *DD = RD->getDestructor();
- if (DD) {
- if (CheckDestructorAccess(ELoc, DD, PD) == AR_inaccessible ||
- DD->isDeleted()) {
- Diag(ELoc, diag::err_omp_required_method)
- << getOpenMPClauseName(OMPC_private) << 4;
- bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
- VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
- Diag(RD->getLocation(), diag::note_previous_decl) << RD;
- continue;
- }
- MarkFunctionReferenced(ELoc, DD);
- DiagnoseUseOfDecl(DD, ELoc);
- }
+ while (Type->isArrayType()) {
+ Type = cast<ArrayType>(Type.getTypePtr())->getElementType();
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
@@ -2925,14 +4156,59 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
continue;
}
+ // Generate helper private variable and initialize it with the default
+ // value. The address of the original variable is replaced by the address of
+ // the new private variable in CodeGen. This new variable is not added to
+ // IdResolver, so the code in the OpenMP region uses original variable for
+ // proper diagnostics.
+ auto VDPrivate =
+ VarDecl::Create(Context, CurContext, DE->getLocStart(),
+ DE->getExprLoc(), VD->getIdentifier(), VD->getType(),
+ VD->getTypeSourceInfo(), /*S*/ SC_Auto);
+ ActOnUninitializedDecl(VDPrivate, /*TypeMayContainAuto*/ false);
+ if (VDPrivate->isInvalidDecl())
+ continue;
+ CurContext->addDecl(VDPrivate);
+ auto VDPrivateRefExpr =
+ DeclRefExpr::Create(Context, /*QualifierLoc*/ NestedNameSpecifierLoc(),
+ /*TemplateKWLoc*/ SourceLocation(), VDPrivate,
+ /*RefersToEnclosingVariableOrCapture*/ false,
+ /*NameLoc*/ SourceLocation(), DE->getType(),
+ /*VK*/ VK_LValue);
+
DSAStack->addDSA(VD, DE, OMPC_private);
Vars.push_back(DE);
+ PrivateCopies.push_back(VDPrivateRefExpr);
}
if (Vars.empty())
return nullptr;
- return OMPPrivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars);
+ return OMPPrivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars,
+ PrivateCopies);
+}
+
+namespace {
+class DiagsUninitializedSeveretyRAII {
+private:
+ DiagnosticsEngine &Diags;
+ SourceLocation SavedLoc;
+ bool IsIgnored;
+
+public:
+ DiagsUninitializedSeveretyRAII(DiagnosticsEngine &Diags, SourceLocation Loc,
+ bool IsIgnored)
+ : Diags(Diags), SavedLoc(Loc), IsIgnored(IsIgnored) {
+ if (!IsIgnored) {
+ Diags.setSeverity(/*Diag*/ diag::warn_uninit_self_reference_in_init,
+ /*Map*/ diag::Severity::Ignored, Loc);
+ }
+ }
+ ~DiagsUninitializedSeveretyRAII() {
+ if (!IsIgnored)
+ Diags.popMappings(SavedLoc);
+ }
+};
}
OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
@@ -2940,6 +4216,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
+ SmallVector<Expr *, 8> PrivateCopies;
+ SmallVector<Expr *, 8> Inits;
bool IsImplicitClause =
StartLoc.isInvalid() && LParenLoc.isInvalid() && EndLoc.isInvalid();
auto ImplicitClauseLoc = DSAStack->getConstructLoc();
@@ -2949,11 +4227,13 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
// It will be analyzed later.
Vars.push_back(RefExpr);
+ PrivateCopies.push_back(nullptr);
+ Inits.push_back(nullptr);
continue;
}
- SourceLocation ELoc = IsImplicitClause ? ImplicitClauseLoc
- : RefExpr->getExprLoc();
+ SourceLocation ELoc =
+ IsImplicitClause ? ImplicitClauseLoc : RefExpr->getExprLoc();
// OpenMP [2.1, C/C++]
// A list item is a variable name.
// OpenMP [2.9.3.3, Restrictions, p.1]
@@ -2971,6 +4251,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
if (Type->isDependentType() || Type->isInstantiationDependentType()) {
// It will be analyzed later.
Vars.push_back(DE);
+ PrivateCopies.push_back(nullptr);
+ Inits.push_back(nullptr);
continue;
}
@@ -3004,65 +4286,6 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// clause requires an accessible, unambiguous copy constructor for the
// class type.
Type = Context.getBaseElementType(Type);
- CXXRecordDecl *RD = getLangOpts().CPlusPlus
- ? Type.getNonReferenceType()->getAsCXXRecordDecl()
- : nullptr;
- // FIXME This code must be replaced by actual constructing/destructing of
- // the firstprivate variable.
- if (RD) {
- CXXConstructorDecl *CD = LookupCopyingConstructor(RD, 0);
- PartialDiagnostic PD =
- PartialDiagnostic(PartialDiagnostic::NullDiagnostic());
- if (!CD ||
- CheckConstructorAccess(ELoc, CD,
- InitializedEntity::InitializeTemporary(Type),
- CD->getAccess(), PD) == AR_inaccessible ||
- CD->isDeleted()) {
- if (IsImplicitClause) {
- Diag(ImplicitClauseLoc,
- diag::err_omp_task_predetermined_firstprivate_required_method)
- << 0;
- Diag(RefExpr->getExprLoc(), diag::note_used_here);
- } else {
- Diag(ELoc, diag::err_omp_required_method)
- << getOpenMPClauseName(OMPC_firstprivate) << 1;
- }
- bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
- VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
- Diag(RD->getLocation(), diag::note_previous_decl) << RD;
- continue;
- }
- MarkFunctionReferenced(ELoc, CD);
- DiagnoseUseOfDecl(CD, ELoc);
-
- CXXDestructorDecl *DD = RD->getDestructor();
- if (DD) {
- if (CheckDestructorAccess(ELoc, DD, PD) == AR_inaccessible ||
- DD->isDeleted()) {
- if (IsImplicitClause) {
- Diag(ImplicitClauseLoc,
- diag::err_omp_task_predetermined_firstprivate_required_method)
- << 1;
- Diag(RefExpr->getExprLoc(), diag::note_used_here);
- } else {
- Diag(ELoc, diag::err_omp_required_method)
- << getOpenMPClauseName(OMPC_firstprivate) << 4;
- }
- bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
- VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
- Diag(RD->getLocation(), diag::note_previous_decl) << RD;
- continue;
- }
- MarkFunctionReferenced(ELoc, DD);
- DiagnoseUseOfDecl(DD, ELoc);
- }
- }
// If an implicit firstprivate variable found it was checked already.
if (!IsImplicitClause) {
@@ -3152,15 +4375,75 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
}
}
+ Type = Type.getUnqualifiedType();
+ auto VDPrivate = VarDecl::Create(Context, CurContext, DE->getLocStart(),
+ ELoc, VD->getIdentifier(), VD->getType(),
+ VD->getTypeSourceInfo(), /*S*/ SC_Auto);
+ // Generate helper private variable and initialize it with the value of the
+ // original variable. The address of the original variable is replaced by
+ // the address of the new private variable in the CodeGen. This new variable
+ // is not added to IdResolver, so the code in the OpenMP region uses
+ // original variable for proper diagnostics and variable capturing.
+ Expr *VDInitRefExpr = nullptr;
+ // For arrays generate initializer for single element and replace it by the
+ // original array element in CodeGen.
+ if (DE->getType()->isArrayType()) {
+ auto VDInit = VarDecl::Create(Context, CurContext, DE->getLocStart(),
+ ELoc, VD->getIdentifier(), Type,
+ VD->getTypeSourceInfo(), /*S*/ SC_Auto);
+ CurContext->addHiddenDecl(VDInit);
+ VDInitRefExpr = DeclRefExpr::Create(
+ Context, /*QualifierLoc*/ NestedNameSpecifierLoc(),
+ /*TemplateKWLoc*/ SourceLocation(), VDInit,
+ /*RefersToEnclosingVariableOrCapture*/ true, ELoc, Type,
+ /*VK*/ VK_LValue);
+ VDInit->setIsUsed();
+ auto Init = DefaultLvalueConversion(VDInitRefExpr).get();
+ InitializedEntity Entity = InitializedEntity::InitializeVariable(VDInit);
+ InitializationKind Kind = InitializationKind::CreateCopy(ELoc, ELoc);
+
+ InitializationSequence InitSeq(*this, Entity, Kind, Init);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Init);
+ if (Result.isInvalid())
+ VDPrivate->setInvalidDecl();
+ else
+ VDPrivate->setInit(Result.getAs<Expr>());
+ } else {
+ AddInitializerToDecl(
+ VDPrivate,
+ DefaultLvalueConversion(
+ DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
+ SourceLocation(), DE->getDecl(),
+ /*RefersToEnclosingVariableOrCapture=*/true,
+ DE->getExprLoc(), DE->getType(),
+ /*VK=*/VK_LValue)).get(),
+ /*DirectInit=*/false, /*TypeMayContainAuto=*/false);
+ }
+ if (VDPrivate->isInvalidDecl()) {
+ if (IsImplicitClause) {
+ Diag(DE->getExprLoc(),
+ diag::note_omp_task_predetermined_firstprivate_here);
+ }
+ continue;
+ }
+ CurContext->addDecl(VDPrivate);
+ auto VDPrivateRefExpr =
+ DeclRefExpr::Create(Context, /*QualifierLoc*/ NestedNameSpecifierLoc(),
+ /*TemplateKWLoc*/ SourceLocation(), VDPrivate,
+ /*RefersToEnclosingVariableOrCapture*/ false,
+ DE->getLocStart(), DE->getType(),
+ /*VK*/ VK_LValue);
DSAStack->addDSA(VD, DE, OMPC_firstprivate);
Vars.push_back(DE);
+ PrivateCopies.push_back(VDPrivateRefExpr);
+ Inits.push_back(VDInitRefExpr);
}
if (Vars.empty())
return nullptr;
return OMPFirstprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- Vars);
+ Vars, PrivateCopies, Inits);
}
OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
@@ -4099,4 +5382,3 @@ OMPClause *Sema::ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
return OMPFlushClause::Create(Context, StartLoc, LParenLoc, EndLoc, VarList);
}
-#undef DSAStack
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 03001d89f07e..9195ee59075d 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -35,7 +35,7 @@
#include <algorithm>
#include <cstdlib>
-namespace clang {
+using namespace clang;
using namespace sema;
/// A convenience routine for creating a decayed reference to a function.
@@ -102,43 +102,9 @@ CompareDerivedToBaseConversions(Sema &S,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2);
-
-
-/// GetConversionCategory - Retrieve the implicit conversion
-/// category corresponding to the given implicit conversion kind.
-ImplicitConversionCategory
-GetConversionCategory(ImplicitConversionKind Kind) {
- static const ImplicitConversionCategory
- Category[(int)ICK_Num_Conversion_Kinds] = {
- ICC_Identity,
- ICC_Lvalue_Transformation,
- ICC_Lvalue_Transformation,
- ICC_Lvalue_Transformation,
- ICC_Identity,
- ICC_Qualification_Adjustment,
- ICC_Promotion,
- ICC_Promotion,
- ICC_Promotion,
- ICC_Conversion,
- ICC_Conversion,
- ICC_Conversion,
- ICC_Conversion,
- ICC_Conversion,
- ICC_Conversion,
- ICC_Conversion,
- ICC_Conversion,
- ICC_Conversion,
- ICC_Conversion,
- ICC_Conversion,
- ICC_Conversion,
- ICC_Conversion
- };
- return Category[(int)Kind];
-}
-
/// GetConversionRank - Retrieve the implicit conversion rank
/// corresponding to the given implicit conversion kind.
-ImplicitConversionRank GetConversionRank(ImplicitConversionKind Kind) {
+ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
static const ImplicitConversionRank
Rank[(int)ICK_Num_Conversion_Kinds] = {
ICR_Exact_Match,
@@ -171,7 +137,7 @@ ImplicitConversionRank GetConversionRank(ImplicitConversionKind Kind) {
/// GetImplicitConversionName - Return the name of this kind of
/// implicit conversion.
-const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
+static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
static const char* const Name[(int)ICK_Num_Conversion_Kinds] = {
"No conversion",
"Lvalue-to-rvalue",
@@ -195,7 +161,7 @@ const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
"Vector splat",
"Complex-real conversion",
"Block Pointer conversion",
- "Transparent Union Conversion"
+ "Transparent Union Conversion",
"Writeback conversion"
};
return Name[Kind];
@@ -568,9 +534,10 @@ namespace {
/// \brief Convert from Sema's representation of template deduction information
/// to the form used in overload-candidate information.
-DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context,
- Sema::TemplateDeductionResult TDK,
- TemplateDeductionInfo &Info) {
+DeductionFailureInfo
+clang::MakeDeductionFailureInfo(ASTContext &Context,
+ Sema::TemplateDeductionResult TDK,
+ TemplateDeductionInfo &Info) {
DeductionFailureInfo Result;
Result.Result = static_cast<unsigned>(TDK);
Result.HasDiagnostic = false;
@@ -1067,7 +1034,7 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
// is a redeclaration of OldMethod.
unsigned OldQuals = OldMethod->getTypeQualifiers();
unsigned NewQuals = NewMethod->getTypeQualifiers();
- if (!getLangOpts().CPlusPlus1y && NewMethod->isConstexpr() &&
+ if (!getLangOpts().CPlusPlus14 && NewMethod->isConstexpr() &&
!isa<CXXConstructorDecl>(NewMethod))
NewQuals |= Qualifiers::Const;
@@ -1268,11 +1235,11 @@ Sema::TryImplicitConversion(Expr *From, QualType ToType,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion) {
- return clang::TryImplicitConversion(*this, From, ToType,
- SuppressUserConversions, AllowExplicit,
- InOverloadResolution, CStyle,
- AllowObjCWritebackConversion,
- /*AllowObjCConversionOnExplicit=*/false);
+ return ::TryImplicitConversion(*this, From, ToType,
+ SuppressUserConversions, AllowExplicit,
+ InOverloadResolution, CStyle,
+ AllowObjCWritebackConversion,
+ /*AllowObjCConversionOnExplicit=*/false);
}
/// PerformImplicitConversion - Perform an implicit conversion of the
@@ -1301,13 +1268,13 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (getLangOpts().ObjC1)
CheckObjCBridgeRelatedConversions(From->getLocStart(),
ToType, From->getType(), From);
- ICS = clang::TryImplicitConversion(*this, From, ToType,
- /*SuppressUserConversions=*/false,
- AllowExplicit,
- /*InOverloadResolution=*/false,
- /*CStyle=*/false,
- AllowObjCWritebackConversion,
- /*AllowObjCConversionOnExplicit=*/false);
+ ICS = ::TryImplicitConversion(*this, From, ToType,
+ /*SuppressUserConversions=*/false,
+ AllowExplicit,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ AllowObjCWritebackConversion,
+ /*AllowObjCConversionOnExplicit=*/false);
return PerformImplicitConversion(From, ToType, ICS, Action);
}
@@ -1451,6 +1418,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// We were able to resolve the address of the overloaded function,
// so we can convert to the type of that function.
FromType = Fn->getType();
+ SCS.setFromType(FromType);
// we can sometimes resolve &foo<int> regardless of ToType, so check
// if the type matches (identity) or we are converting to bool
@@ -3656,7 +3624,7 @@ CompareStandardConversionSequences(Sema &S,
/// CompareQualificationConversions - Compares two standard conversion
/// sequences to determine whether they can be ranked based on their
/// qualification conversions (C++ 13.3.3.2p3 bullet 3).
-ImplicitConversionSequence::CompareKind
+static ImplicitConversionSequence::CompareKind
CompareQualificationConversions(Sema &S,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2) {
@@ -3769,7 +3737,7 @@ CompareQualificationConversions(Sema &S,
/// various kinds of derived-to-base conversions (C++
/// [over.ics.rank]p4b3). As part of these checks, we also look at
/// conversions between Objective-C interface types.
-ImplicitConversionSequence::CompareKind
+static ImplicitConversionSequence::CompareKind
CompareDerivedToBaseConversions(Sema &S,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2) {
@@ -4849,9 +4817,8 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
// Note that we always use the true parent context when performing
// the actual argument initialization.
- ImplicitConversionSequence ICS
- = TryObjectArgumentInitialization(*this, From->getType(), FromClassification,
- Method, Method->getParent());
+ ImplicitConversionSequence ICS = TryObjectArgumentInitialization(
+ *this, From->getType(), FromClassification, Method, Method->getParent());
if (ICS.isBad()) {
if (ICS.Bad.Kind == BadConversionSequence::bad_qualifiers) {
Qualifiers FromQs = FromRecordType.getQualifiers();
@@ -4927,41 +4894,51 @@ static bool CheckConvertedConstantConversions(Sema &S,
// conversions are fine.
switch (SCS.Second) {
case ICK_Identity:
+ case ICK_NoReturn_Adjustment:
case ICK_Integral_Promotion:
- case ICK_Integral_Conversion:
- case ICK_Zero_Event_Conversion:
+ case ICK_Integral_Conversion: // Narrowing conversions are checked elsewhere.
return true;
case ICK_Boolean_Conversion:
// Conversion from an integral or unscoped enumeration type to bool is
- // classified as ICK_Boolean_Conversion, but it's also an integral
- // conversion, so it's permitted in a converted constant expression.
+ // classified as ICK_Boolean_Conversion, but it's also arguably an integral
+ // conversion, so we allow it in a converted constant expression.
+ //
+ // FIXME: Per core issue 1407, we should not allow this, but that breaks
+ // a lot of popular code. We should at least add a warning for this
+ // (non-conforming) extension.
return SCS.getFromType()->isIntegralOrUnscopedEnumerationType() &&
SCS.getToType(2)->isBooleanType();
+ case ICK_Pointer_Conversion:
+ case ICK_Pointer_Member:
+ // C++1z: null pointer conversions and null member pointer conversions are
+ // only permitted if the source type is std::nullptr_t.
+ return SCS.getFromType()->isNullPtrType();
+
+ case ICK_Floating_Promotion:
+ case ICK_Complex_Promotion:
+ case ICK_Floating_Conversion:
+ case ICK_Complex_Conversion:
case ICK_Floating_Integral:
+ case ICK_Compatible_Conversion:
+ case ICK_Derived_To_Base:
+ case ICK_Vector_Conversion:
+ case ICK_Vector_Splat:
case ICK_Complex_Real:
+ case ICK_Block_Pointer_Conversion:
+ case ICK_TransparentUnionConversion:
+ case ICK_Writeback_Conversion:
+ case ICK_Zero_Event_Conversion:
return false;
case ICK_Lvalue_To_Rvalue:
case ICK_Array_To_Pointer:
case ICK_Function_To_Pointer:
- case ICK_NoReturn_Adjustment:
+ llvm_unreachable("found a first conversion kind in Second");
+
case ICK_Qualification:
- case ICK_Compatible_Conversion:
- case ICK_Vector_Conversion:
- case ICK_Vector_Splat:
- case ICK_Derived_To_Base:
- case ICK_Pointer_Conversion:
- case ICK_Pointer_Member:
- case ICK_Block_Pointer_Conversion:
- case ICK_Writeback_Conversion:
- case ICK_Floating_Promotion:
- case ICK_Complex_Promotion:
- case ICK_Complex_Conversion:
- case ICK_Floating_Conversion:
- case ICK_TransparentUnionConversion:
- llvm_unreachable("unexpected second conversion kind");
+ llvm_unreachable("found a third conversion kind in Second");
case ICK_Num_Conversion_Kinds:
break;
@@ -4973,67 +4950,71 @@ static bool CheckConvertedConstantConversions(Sema &S,
/// CheckConvertedConstantExpression - Check that the expression From is a
/// converted constant expression of type T, perform the conversion and produce
/// the converted expression, per C++11 [expr.const]p3.
-ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
- llvm::APSInt &Value,
- CCEKind CCE) {
- assert(LangOpts.CPlusPlus11 && "converted constant expression outside C++11");
- assert(T->isIntegralOrEnumerationType() && "unexpected converted const type");
-
- if (checkPlaceholderForOverload(*this, From))
+static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
+ QualType T, APValue &Value,
+ Sema::CCEKind CCE,
+ bool RequireInt) {
+ assert(S.getLangOpts().CPlusPlus11 &&
+ "converted constant expression outside C++11");
+
+ if (checkPlaceholderForOverload(S, From))
return ExprError();
- // C++11 [expr.const]p3 with proposed wording fixes:
- // A converted constant expression of type T is a core constant expression,
- // implicitly converted to a prvalue of type T, where the converted
- // expression is a literal constant expression and the implicit conversion
- // sequence contains only user-defined conversions, lvalue-to-rvalue
- // conversions, integral promotions, and integral conversions other than
- // narrowing conversions.
+ // C++1z [expr.const]p3:
+ // A converted constant expression of type T is an expression,
+ // implicitly converted to type T, where the converted
+ // expression is a constant expression and the implicit conversion
+ // sequence contains only [... list of conversions ...].
ImplicitConversionSequence ICS =
- TryImplicitConversion(From, T,
+ TryCopyInitialization(S, From, T,
/*SuppressUserConversions=*/false,
- /*AllowExplicit=*/false,
/*InOverloadResolution=*/false,
- /*CStyle=*/false,
- /*AllowObjcWritebackConversion=*/false);
+ /*AllowObjcWritebackConversion=*/false,
+ /*AllowExplicit=*/false);
StandardConversionSequence *SCS = nullptr;
switch (ICS.getKind()) {
case ImplicitConversionSequence::StandardConversion:
- if (!CheckConvertedConstantConversions(*this, ICS.Standard))
- return Diag(From->getLocStart(),
- diag::err_typecheck_converted_constant_expression_disallowed)
- << From->getType() << From->getSourceRange() << T;
SCS = &ICS.Standard;
break;
case ImplicitConversionSequence::UserDefinedConversion:
- // We are converting from class type to an integral or enumeration type, so
- // the Before sequence must be trivial.
- if (!CheckConvertedConstantConversions(*this, ICS.UserDefined.After))
- return Diag(From->getLocStart(),
- diag::err_typecheck_converted_constant_expression_disallowed)
- << From->getType() << From->getSourceRange() << T;
+ // We are converting to a non-class type, so the Before sequence
+ // must be trivial.
SCS = &ICS.UserDefined.After;
break;
case ImplicitConversionSequence::AmbiguousConversion:
case ImplicitConversionSequence::BadConversion:
- if (!DiagnoseMultipleUserDefinedConversion(From, T))
- return Diag(From->getLocStart(),
- diag::err_typecheck_converted_constant_expression)
- << From->getType() << From->getSourceRange() << T;
+ if (!S.DiagnoseMultipleUserDefinedConversion(From, T))
+ return S.Diag(From->getLocStart(),
+ diag::err_typecheck_converted_constant_expression)
+ << From->getType() << From->getSourceRange() << T;
return ExprError();
case ImplicitConversionSequence::EllipsisConversion:
llvm_unreachable("ellipsis conversion in converted constant expression");
}
- ExprResult Result = PerformImplicitConversion(From, T, ICS, AA_Converting);
+ // Check that we would only use permitted conversions.
+ if (!CheckConvertedConstantConversions(S, *SCS)) {
+ return S.Diag(From->getLocStart(),
+ diag::err_typecheck_converted_constant_expression_disallowed)
+ << From->getType() << From->getSourceRange() << T;
+ }
+ // [...] and where the reference binding (if any) binds directly.
+ if (SCS->ReferenceBinding && !SCS->DirectBinding) {
+ return S.Diag(From->getLocStart(),
+ diag::err_typecheck_converted_constant_expression_indirect)
+ << From->getType() << From->getSourceRange() << T;
+ }
+
+ ExprResult Result =
+ S.PerformImplicitConversion(From, T, ICS, Sema::AA_Converting);
if (Result.isInvalid())
return Result;
// Check for a narrowing implicit conversion.
APValue PreNarrowingValue;
QualType PreNarrowingType;
- switch (SCS->getNarrowingKind(Context, Result.get(), PreNarrowingValue,
+ switch (SCS->getNarrowingKind(S.Context, Result.get(), PreNarrowingValue,
PreNarrowingType)) {
case NK_Variable_Narrowing:
// Implicit conversion to a narrower type, and the value is not a constant
@@ -5042,13 +5023,13 @@ ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
break;
case NK_Constant_Narrowing:
- Diag(From->getLocStart(), diag::ext_cce_narrowing)
+ S.Diag(From->getLocStart(), diag::ext_cce_narrowing)
<< CCE << /*Constant*/1
- << PreNarrowingValue.getAsString(Context, PreNarrowingType) << T;
+ << PreNarrowingValue.getAsString(S.Context, PreNarrowingType) << T;
break;
case NK_Type_Narrowing:
- Diag(From->getLocStart(), diag::ext_cce_narrowing)
+ S.Diag(From->getLocStart(), diag::ext_cce_narrowing)
<< CCE << /*Constant*/0 << From->getType() << T;
break;
}
@@ -5058,12 +5039,15 @@ ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
Expr::EvalResult Eval;
Eval.Diag = &Notes;
- if (!Result.get()->EvaluateAsRValue(Eval, Context) || !Eval.Val.isInt()) {
+ if ((T->isReferenceType()
+ ? !Result.get()->EvaluateAsLValue(Eval, S.Context)
+ : !Result.get()->EvaluateAsRValue(Eval, S.Context)) ||
+ (RequireInt && !Eval.Val.isInt())) {
// The expression can't be folded, so we can't keep it at this position in
// the AST.
Result = ExprError();
} else {
- Value = Eval.Val.getInt();
+ Value = Eval.Val;
if (Notes.empty()) {
// It's a constant expression.
@@ -5074,16 +5058,34 @@ ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
// It's not a constant expression. Produce an appropriate diagnostic.
if (Notes.size() == 1 &&
Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr)
- Diag(Notes[0].first, diag::err_expr_not_cce) << CCE;
+ S.Diag(Notes[0].first, diag::err_expr_not_cce) << CCE;
else {
- Diag(From->getLocStart(), diag::err_expr_not_cce)
+ S.Diag(From->getLocStart(), diag::err_expr_not_cce)
<< CCE << From->getSourceRange();
for (unsigned I = 0; I < Notes.size(); ++I)
- Diag(Notes[I].first, Notes[I].second);
+ S.Diag(Notes[I].first, Notes[I].second);
}
- return Result;
+ return ExprError();
+}
+
+ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
+ APValue &Value, CCEKind CCE) {
+ return ::CheckConvertedConstantExpression(*this, From, T, Value, CCE, false);
}
+ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
+ llvm::APSInt &Value,
+ CCEKind CCE) {
+ assert(T->isIntegralOrEnumerationType() && "unexpected converted const type");
+
+ APValue V;
+ auto R = ::CheckConvertedConstantExpression(*this, From, T, V, CCE, true);
+ if (!R.isInvalid())
+ Value = V.getInt();
+ return R;
+}
+
+
/// dropPointerConversions - If the given standard conversion sequence
/// involves any pointer conversions, remove them. This may change
/// the result type of the conversion sequence.
@@ -5364,14 +5366,14 @@ ExprResult Sema::PerformContextualImplicitConversion(
CXXConversionDecl *Conversion;
FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D);
if (ConvTemplate) {
- if (getLangOpts().CPlusPlus1y)
+ if (getLangOpts().CPlusPlus14)
Conversion = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
else
continue; // C++11 does not consider conversion operator templates(?).
} else
Conversion = cast<CXXConversionDecl>(D);
- assert((!ConvTemplate || getLangOpts().CPlusPlus1y) &&
+ assert((!ConvTemplate || getLangOpts().CPlusPlus14) &&
"Conversion operator templates are considered potentially "
"viable in C++1y");
@@ -5384,7 +5386,7 @@ ExprResult Sema::PerformContextualImplicitConversion(
if (!ConvTemplate)
ExplicitConversions.addDecl(I.getDecl(), I.getAccess());
} else {
- if (!ConvTemplate && getLangOpts().CPlusPlus1y) {
+ if (!ConvTemplate && getLangOpts().CPlusPlus14) {
if (ToType.isNull())
ToType = CurToType.getUnqualifiedType();
else if (HasUniqueTargetType &&
@@ -5396,7 +5398,7 @@ ExprResult Sema::PerformContextualImplicitConversion(
}
}
- if (getLangOpts().CPlusPlus1y) {
+ if (getLangOpts().CPlusPlus14) {
// C++1y [conv]p6:
// ... An expression e of class type E appearing in such a context
// is said to be contextually implicitly converted to a specified
@@ -5584,6 +5586,15 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
// Overload resolution is always an unevaluated context.
EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+ // Add this candidate
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size());
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = Function;
+ Candidate.Viable = true;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = Args.size();
+
if (Constructor) {
// C++ [class.copy]p3:
// A member function template is never instantiated to perform the copy
@@ -5592,19 +5603,13 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
if (Args.size() == 1 &&
Constructor->isSpecializationCopyingObject() &&
(Context.hasSameUnqualifiedType(ClassType, Args[0]->getType()) ||
- IsDerivedFrom(Args[0]->getType(), ClassType)))
+ IsDerivedFrom(Args[0]->getType(), ClassType))) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_illegal_constructor;
return;
+ }
}
- // Add this candidate
- OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size());
- Candidate.FoundDecl = FoundDecl;
- Candidate.Function = Function;
- Candidate.Viable = true;
- Candidate.IsSurrogate = false;
- Candidate.IgnoreObjectArgument = false;
- Candidate.ExplicitCallArguments = Args.size();
-
unsigned NumParams = Proto->getNumParams();
// (C++ 13.3.2p2): A candidate function having fewer than m
@@ -5633,7 +5638,11 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
// (CUDA B.1): Check for invalid calls between targets.
if (getLangOpts().CUDA)
if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
- if (CheckCUDATarget(Caller, Function)) {
+ // Skip the check for callers that are implicit members, because in this
+ // case we may not yet know what the member's target is; the target is
+ // inferred for the member automatically, based on the bases and fields of
+ // the class.
+ if (!Caller->isImplicit() && CheckCUDATarget(Caller, Function)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_target;
return;
@@ -5676,6 +5685,93 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
}
}
+ObjCMethodDecl *Sema::SelectBestMethod(Selector Sel, MultiExprArg Args,
+ bool IsInstance) {
+ SmallVector<ObjCMethodDecl*, 4> Methods;
+ if (!CollectMultipleMethodsInGlobalPool(Sel, Methods, IsInstance))
+ return nullptr;
+
+ for (unsigned b = 0, e = Methods.size(); b < e; b++) {
+ bool Match = true;
+ ObjCMethodDecl *Method = Methods[b];
+ unsigned NumNamedArgs = Sel.getNumArgs();
+ // Method might have more arguments than selector indicates. This is due
+ // to addition of c-style arguments in method.
+ if (Method->param_size() > NumNamedArgs)
+ NumNamedArgs = Method->param_size();
+ if (Args.size() < NumNamedArgs)
+ continue;
+
+ for (unsigned i = 0; i < NumNamedArgs; i++) {
+ // We can't do any type-checking on a type-dependent argument.
+ if (Args[i]->isTypeDependent()) {
+ Match = false;
+ break;
+ }
+
+ ParmVarDecl *param = Method->parameters()[i];
+ Expr *argExpr = Args[i];
+ assert(argExpr && "SelectBestMethod(): missing expression");
+
+ // Strip the unbridged-cast placeholder expression off unless it's
+ // a consumed argument.
+ if (argExpr->hasPlaceholderType(BuiltinType::ARCUnbridgedCast) &&
+ !param->hasAttr<CFConsumedAttr>())
+ argExpr = stripARCUnbridgedCast(argExpr);
+
+ // If the parameter is __unknown_anytype, move on to the next method.
+ if (param->getType() == Context.UnknownAnyTy) {
+ Match = false;
+ break;
+ }
+
+ ImplicitConversionSequence ConversionState
+ = TryCopyInitialization(*this, argExpr, param->getType(),
+ /*SuppressUserConversions*/false,
+ /*InOverloadResolution=*/true,
+ /*AllowObjCWritebackConversion=*/
+ getLangOpts().ObjCAutoRefCount,
+ /*AllowExplicit*/false);
+ if (ConversionState.isBad()) {
+ Match = false;
+ break;
+ }
+ }
+ // Promote additional arguments to variadic methods.
+ if (Match && Method->isVariadic()) {
+ for (unsigned i = NumNamedArgs, e = Args.size(); i < e; ++i) {
+ if (Args[i]->isTypeDependent()) {
+ Match = false;
+ break;
+ }
+ ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod,
+ nullptr);
+ if (Arg.isInvalid()) {
+ Match = false;
+ break;
+ }
+ }
+ } else {
+ // Check for extra arguments to non-variadic methods.
+ if (Args.size() != NumNamedArgs)
+ Match = false;
+ else if (Match && NumNamedArgs == 0 && Methods.size() > 1) {
+ // Special case when selectors have no argument. In this case, select
+ // one with the most general result type of 'id'.
+ for (unsigned b = 0, e = Methods.size(); b < e; b++) {
+ QualType ReturnT = Methods[b]->getReturnType();
+ if (ReturnT->isObjCIdType())
+ return Methods[b];
+ }
+ }
+ }
+
+ if (Match)
+ return Method;
+ }
+ return nullptr;
+}
+
static bool IsNotEnableIfAttr(Attr *A) { return !isa<EnableIfAttr>(A); }
EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
@@ -5696,6 +5792,7 @@ EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
// Convert the arguments.
SmallVector<Expr *, 16> ConvertedArgs;
bool InitializationFailed = false;
+ bool ContainsValueDependentExpr = false;
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
if (i == 0 && !MissingImplicitThis && isa<CXXMethodDecl>(Function) &&
!cast<CXXMethodDecl>(Function)->isStatic() &&
@@ -5708,6 +5805,7 @@ EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
InitializationFailed = true;
break;
}
+ ContainsValueDependentExpr |= R.get()->isValueDependent();
ConvertedArgs.push_back(R.get());
} else {
ExprResult R =
@@ -5720,6 +5818,7 @@ EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
InitializationFailed = true;
break;
}
+ ContainsValueDependentExpr |= R.get()->isValueDependent();
ConvertedArgs.push_back(R.get());
}
}
@@ -5730,11 +5829,16 @@ EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
for (AttrVec::iterator I = Attrs.begin(); I != E; ++I) {
APValue Result;
EnableIfAttr *EIA = cast<EnableIfAttr>(*I);
+ if (EIA->getCond()->isValueDependent()) {
+ // Don't even try now, we'll examine it after instantiation.
+ continue;
+ }
+
if (!EIA->getCond()->EvaluateWithSubstitution(
- Result, Context, Function,
- ArrayRef<const Expr*>(ConvertedArgs.data(),
- ConvertedArgs.size())) ||
- !Result.isInt() || !Result.getInt().getBoolValue()) {
+ Result, Context, Function, llvm::makeArrayRef(ConvertedArgs))) {
+ if (!ContainsValueDependentExpr)
+ return EIA;
+ } else if (!Result.isInt() || !Result.getInt().getBoolValue()) {
return EIA;
}
}
@@ -5891,6 +5995,15 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
}
}
+ // (CUDA B.1): Check for invalid calls between targets.
+ if (getLangOpts().CUDA)
+ if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
+ if (CheckCUDATarget(Caller, Method)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_target;
+ return;
+ }
+
// Determine the implicit conversion sequences for each of the
// arguments.
for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
@@ -6087,7 +6200,7 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
// If the conversion function has an undeduced return type, trigger its
// deduction now.
- if (getLangOpts().CPlusPlus1y && ConvType->isUndeducedType()) {
+ if (getLangOpts().CPlusPlus14 && ConvType->isUndeducedType()) {
if (DeduceReturnType(Conversion, From->getExprLoc()))
return;
ConvType = Conversion->getConversionType().getNonReferenceType();
@@ -6226,7 +6339,7 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
"Can only end up with a standard conversion sequence or failure");
}
- if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, ArrayRef<Expr*>())) {
+ if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
@@ -6379,7 +6492,7 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
}
}
- if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, ArrayRef<Expr*>())) {
+ if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
@@ -6610,7 +6723,7 @@ BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
const Qualifiers &VisibleQuals) {
// Insert this type.
- if (!PointerTypes.insert(Ty))
+ if (!PointerTypes.insert(Ty).second)
return false;
QualType PointeeTy;
@@ -6678,7 +6791,7 @@ bool
BuiltinCandidateTypeSet::AddMemberPointerWithMoreQualifiedTypeVariants(
QualType Ty) {
// Insert this type.
- if (!MemberPointerTypes.insert(Ty))
+ if (!MemberPointerTypes.insert(Ty).second)
return false;
const MemberPointerType *PointerTy = Ty->getAs<MemberPointerType>();
@@ -7248,7 +7361,7 @@ public:
MemPtr != MemPtrEnd;
++MemPtr) {
// Don't add the same builtin candidate twice.
- if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)))
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second)
continue;
QualType ParamTypes[2] = { *MemPtr, *MemPtr };
@@ -7323,7 +7436,7 @@ public:
PtrEnd = CandidateTypes[ArgIdx].pointer_end();
Ptr != PtrEnd; ++Ptr) {
// Don't add the same builtin candidate twice.
- if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)))
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
continue;
QualType ParamTypes[2] = { *Ptr, *Ptr };
@@ -7337,7 +7450,7 @@ public:
// Don't add the same builtin candidate twice, or if a user defined
// candidate exists.
- if (!AddedTypes.insert(CanonType) ||
+ if (!AddedTypes.insert(CanonType).second ||
UserDefinedBinaryOperators.count(std::make_pair(CanonType,
CanonType)))
continue;
@@ -7348,7 +7461,7 @@ public:
if (CandidateTypes[ArgIdx].hasNullPtrType()) {
CanQualType NullPtrTy = S.Context.getCanonicalType(S.Context.NullPtrTy);
- if (AddedTypes.insert(NullPtrTy) &&
+ if (AddedTypes.insert(NullPtrTy).second &&
!UserDefinedBinaryOperators.count(std::make_pair(NullPtrTy,
NullPtrTy))) {
QualType ParamTypes[2] = { NullPtrTy, NullPtrTy };
@@ -7401,7 +7514,7 @@ public:
}
if (Op == OO_Minus) {
// ptrdiff_t operator-(T, T);
- if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)))
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
continue;
QualType ParamTypes[2] = { *Ptr, *Ptr };
@@ -7530,7 +7643,7 @@ public:
Enum = CandidateTypes[ArgIdx].enumeration_begin(),
EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
Enum != EnumEnd; ++Enum) {
- if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)))
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)).second)
continue;
AddBuiltinAssignmentOperatorCandidates(S, *Enum, Args, CandidateSet);
@@ -7540,7 +7653,7 @@ public:
MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
MemPtr != MemPtrEnd; ++MemPtr) {
- if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)))
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second)
continue;
AddBuiltinAssignmentOperatorCandidates(S, *MemPtr, Args, CandidateSet);
@@ -7623,7 +7736,7 @@ public:
PtrEnd = CandidateTypes[1].pointer_end();
Ptr != PtrEnd; ++Ptr) {
// Make sure we don't add the same candidate twice.
- if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)))
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
continue;
QualType ParamTypes[2] = {
@@ -7904,7 +8017,7 @@ public:
Ptr = CandidateTypes[ArgIdx].pointer_begin(),
PtrEnd = CandidateTypes[ArgIdx].pointer_end();
Ptr != PtrEnd; ++Ptr) {
- if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)))
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
continue;
QualType ParamTypes[2] = { *Ptr, *Ptr };
@@ -7915,7 +8028,7 @@ public:
MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
MemPtr != MemPtrEnd; ++MemPtr) {
- if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)))
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second)
continue;
QualType ParamTypes[2] = { *MemPtr, *MemPtr };
@@ -7930,7 +8043,7 @@ public:
if (!(*Enum)->getAs<EnumType>()->getDecl()->isScoped())
continue;
- if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)))
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)).second)
continue;
QualType ParamTypes[2] = { *Enum, *Enum };
@@ -8184,12 +8297,10 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
/// isBetterOverloadCandidate - Determines whether the first overload
/// candidate is a better candidate than the second (C++ 13.3.3p1).
-bool
-isBetterOverloadCandidate(Sema &S,
- const OverloadCandidate &Cand1,
- const OverloadCandidate &Cand2,
- SourceLocation Loc,
- bool UserDefinedConversion) {
+bool clang::isBetterOverloadCandidate(Sema &S, const OverloadCandidate &Cand1,
+ const OverloadCandidate &Cand2,
+ SourceLocation Loc,
+ bool UserDefinedConversion) {
// Define viable functions to be better candidates than non-viable
// functions.
if (!Cand2.Viable)
@@ -8521,9 +8632,8 @@ void ImplicitConversionSequence::DiagnoseAmbiguousConversion(
S.Diag(SourceLocation(), diag::note_ovl_too_many_candidates) << int(E - I);
}
-namespace {
-
-void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
+static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
+ unsigned I) {
const ImplicitConversionSequence &Conv = Cand->Conversions[I];
assert(Conv.isBad());
assert(Cand->Function && "for now, candidate must be a function");
@@ -8741,8 +8851,8 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
/// Additional arity mismatch diagnosis specific to a function overload
/// candidates. This is not covered by the more general DiagnoseArityMismatch()
/// over a candidate in any candidate set.
-bool CheckArityMismatch(Sema &S, OverloadCandidate *Cand,
- unsigned NumArgs) {
+static bool CheckArityMismatch(Sema &S, OverloadCandidate *Cand,
+ unsigned NumArgs) {
FunctionDecl *Fn = Cand->Function;
unsigned MinParams = Fn->getMinRequiredArguments();
@@ -8769,7 +8879,7 @@ bool CheckArityMismatch(Sema &S, OverloadCandidate *Cand,
}
/// General arity mismatch diagnosis over a candidate in a candidate set.
-void DiagnoseArityMismatch(Sema &S, Decl *D, unsigned NumFormalArgs) {
+static void DiagnoseArityMismatch(Sema &S, Decl *D, unsigned NumFormalArgs) {
assert(isa<FunctionDecl>(D) &&
"The templated declaration should at least be a function"
" when diagnosing bad template argument deduction due to too many"
@@ -8813,13 +8923,13 @@ void DiagnoseArityMismatch(Sema &S, Decl *D, unsigned NumFormalArgs) {
}
/// Arity mismatch diagnosis specific to a function overload candidate.
-void DiagnoseArityMismatch(Sema &S, OverloadCandidate *Cand,
- unsigned NumFormalArgs) {
+static void DiagnoseArityMismatch(Sema &S, OverloadCandidate *Cand,
+ unsigned NumFormalArgs) {
if (!CheckArityMismatch(S, Cand, NumFormalArgs))
DiagnoseArityMismatch(S, Cand->Function, NumFormalArgs);
}
-TemplateDecl *getDescribedTemplate(Decl *Templated) {
+static TemplateDecl *getDescribedTemplate(Decl *Templated) {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Templated))
return FD->getDescribedFunctionTemplate();
else if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Templated))
@@ -8830,9 +8940,9 @@ TemplateDecl *getDescribedTemplate(Decl *Templated) {
}
/// Diagnose a failed template-argument deduction.
-void DiagnoseBadDeduction(Sema &S, Decl *Templated,
- DeductionFailureInfo &DeductionFailure,
- unsigned NumArgs) {
+static void DiagnoseBadDeduction(Sema &S, Decl *Templated,
+ DeductionFailureInfo &DeductionFailure,
+ unsigned NumArgs) {
TemplateParameter Param = DeductionFailure.getTemplateParameter();
NamedDecl *ParamD;
(ParamD = Param.dyn_cast<TemplateTypeParmDecl*>()) ||
@@ -9019,7 +9129,8 @@ void DiagnoseBadDeduction(Sema &S, Decl *Templated,
}
/// Diagnose a failed template-argument deduction, for function calls.
-void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand, unsigned NumArgs) {
+static void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
+ unsigned NumArgs) {
unsigned TDK = Cand->DeductionFailure.Result;
if (TDK == Sema::TDK_TooFewArguments || TDK == Sema::TDK_TooManyArguments) {
if (CheckArityMismatch(S, Cand, NumArgs))
@@ -9030,7 +9141,7 @@ void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand, unsigned NumArgs) {
}
/// CUDA: diagnose an invalid call across targets.
-void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
+static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
FunctionDecl *Caller = cast<FunctionDecl>(S.CurContext);
FunctionDecl *Callee = Cand->Function;
@@ -9041,10 +9152,50 @@ void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Callee, FnDesc);
S.Diag(Callee->getLocation(), diag::note_ovl_candidate_bad_target)
- << (unsigned) FnKind << CalleeTarget << CallerTarget;
+ << (unsigned)FnKind << CalleeTarget << CallerTarget;
+
+ // This could be an implicit constructor for which we could not infer the
+ // target due to a collsion. Diagnose that case.
+ CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Callee);
+ if (Meth != nullptr && Meth->isImplicit()) {
+ CXXRecordDecl *ParentClass = Meth->getParent();
+ Sema::CXXSpecialMember CSM;
+
+ switch (FnKind) {
+ default:
+ return;
+ case oc_implicit_default_constructor:
+ CSM = Sema::CXXDefaultConstructor;
+ break;
+ case oc_implicit_copy_constructor:
+ CSM = Sema::CXXCopyConstructor;
+ break;
+ case oc_implicit_move_constructor:
+ CSM = Sema::CXXMoveConstructor;
+ break;
+ case oc_implicit_copy_assignment:
+ CSM = Sema::CXXCopyAssignment;
+ break;
+ case oc_implicit_move_assignment:
+ CSM = Sema::CXXMoveAssignment;
+ break;
+ };
+
+ bool ConstRHS = false;
+ if (Meth->getNumParams()) {
+ if (const ReferenceType *RT =
+ Meth->getParamDecl(0)->getType()->getAs<ReferenceType>()) {
+ ConstRHS = RT->getPointeeType().isConstQualified();
+ }
+ }
+
+ S.inferCUDATargetForImplicitSpecialMember(ParentClass, CSM, Meth,
+ /* ConstRHS */ ConstRHS,
+ /* Diagnose */ true);
+ }
}
-void DiagnoseFailedEnableIfAttr(Sema &S, OverloadCandidate *Cand) {
+static void DiagnoseFailedEnableIfAttr(Sema &S, OverloadCandidate *Cand) {
FunctionDecl *Callee = Cand->Function;
EnableIfAttr *Attr = static_cast<EnableIfAttr*>(Cand->DeductionFailure.Data);
@@ -9066,8 +9217,8 @@ void DiagnoseFailedEnableIfAttr(Sema &S, OverloadCandidate *Cand) {
/// It would be great to be able to express per-candidate problems
/// more richly for those diagnostic clients that cared, but we'd
/// still have to be just as careful with the default diagnostics.
-void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
- unsigned NumArgs) {
+static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
+ unsigned NumArgs) {
FunctionDecl *Fn = Cand->Function;
// Note deleted candidates, but only if they're viable.
@@ -9097,6 +9248,13 @@ void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
case ovl_fail_bad_deduction:
return DiagnoseBadDeduction(S, Cand, NumArgs);
+ case ovl_fail_illegal_constructor: {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_illegal_constructor)
+ << (Fn->getPrimaryTemplate() ? 1 : 0);
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
case ovl_fail_trivial_conversion:
case ovl_fail_bad_final_conversion:
case ovl_fail_final_conversion_not_exact:
@@ -9122,7 +9280,7 @@ void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
}
}
-void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) {
+static void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) {
// Desugar the type of the surrogate down to a function type,
// retaining as many typedefs as possible while still showing
// the function type (and, therefore, its parameter types).
@@ -9155,10 +9313,9 @@ void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) {
MaybeEmitInheritedConstructorNote(S, Cand->Surrogate);
}
-void NoteBuiltinOperatorCandidate(Sema &S,
- StringRef Opc,
- SourceLocation OpLoc,
- OverloadCandidate *Cand) {
+static void NoteBuiltinOperatorCandidate(Sema &S, StringRef Opc,
+ SourceLocation OpLoc,
+ OverloadCandidate *Cand) {
assert(Cand->NumConversions <= 2 && "builtin operator is not binary");
std::string TypeStr("operator");
TypeStr += Opc;
@@ -9175,8 +9332,8 @@ void NoteBuiltinOperatorCandidate(Sema &S,
}
}
-void NoteAmbiguousUserConversions(Sema &S, SourceLocation OpLoc,
- OverloadCandidate *Cand) {
+static void NoteAmbiguousUserConversions(Sema &S, SourceLocation OpLoc,
+ OverloadCandidate *Cand) {
unsigned NoOperands = Cand->NumConversions;
for (unsigned ArgIdx = 0; ArgIdx < NoOperands; ++ArgIdx) {
const ImplicitConversionSequence &ICS = Cand->Conversions[ArgIdx];
@@ -9228,6 +9385,7 @@ static unsigned RankDeductionFailure(const DeductionFailureInfo &DFI) {
llvm_unreachable("Unhandled deduction result");
}
+namespace {
struct CompareOverloadCandidatesForDisplay {
Sema &S;
size_t NumArgs;
@@ -9351,11 +9509,12 @@ struct CompareOverloadCandidatesForDisplay {
return S.SourceMgr.isBeforeInTranslationUnit(LLoc, RLoc);
}
};
+}
/// CompleteNonViableCandidate - Normally, overload resolution only
/// computes up to the first. Produces the FixIt set if possible.
-void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
- ArrayRef<Expr *> Args) {
+static void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
+ ArrayRef<Expr *> Args) {
assert(!Cand->Viable);
// Don't do anything on failures other than bad conversion.
@@ -9435,8 +9594,6 @@ void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
}
}
-} // end anonymous namespace
-
/// PrintOverloadCandidates - When overload resolution fails, prints
/// diagnostic messages containing the candidates in the candidate
/// set.
@@ -9513,6 +9670,7 @@ GetLocationForCandidate(const TemplateSpecCandidate *Cand) {
: SourceLocation();
}
+namespace {
struct CompareTemplateSpecCandidatesForDisplay {
Sema &S;
CompareTemplateSpecCandidatesForDisplay(Sema &S) : S(S) {}
@@ -9543,6 +9701,7 @@ struct CompareTemplateSpecCandidatesForDisplay {
return S.SourceMgr.isBeforeInTranslationUnit(LLoc, RLoc);
}
};
+}
/// Diagnose a template argument deduction failure.
/// We are treating these failures as overload failures due to bad
@@ -9631,10 +9790,10 @@ QualType Sema::ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType) {
return Ret;
}
+namespace {
// A helper class to help with address of function resolution
// - allows us to avoid passing around all those ugly parameters
-class AddressOfFunctionResolver
-{
+class AddressOfFunctionResolver {
Sema& S;
Expr* SourceExpr;
const QualType& TargetType;
@@ -9782,12 +9941,12 @@ private:
if (FunctionDecl *FunDecl = dyn_cast<FunctionDecl>(Fn)) {
if (S.getLangOpts().CUDA)
if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext))
- if (S.CheckCUDATarget(Caller, FunDecl))
+ if (!Caller->isImplicit() && S.CheckCUDATarget(Caller, FunDecl))
return false;
// If any candidate has a placeholder return type, trigger its deduction
// now.
- if (S.getLangOpts().CPlusPlus1y &&
+ if (S.getLangOpts().CPlusPlus14 &&
FunDecl->getReturnType()->isUndeducedType() &&
S.DeduceReturnType(FunDecl, SourceExpr->getLocStart(), Complain))
return false;
@@ -9960,7 +10119,8 @@ public:
return &Matches[0].first;
}
};
-
+}
+
/// ResolveAddressOfOverloadedFunction - Try to resolve the address of
/// an overloaded function (C++ [over.over]), where @p From is an
/// expression with overloaded function type and @p ToType is the type
@@ -10092,7 +10252,7 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
if (FoundResult) *FoundResult = I.getPair();
}
- if (Matched && getLangOpts().CPlusPlus1y &&
+ if (Matched && getLangOpts().CPlusPlus14 &&
Matched->getReturnType()->isUndeducedType() &&
DeduceReturnType(Matched, ovl->getExprLoc(), Complain))
return nullptr;
@@ -10422,6 +10582,15 @@ public:
}
+static std::unique_ptr<CorrectionCandidateCallback>
+MakeValidator(Sema &SemaRef, MemberExpr *ME, size_t NumArgs,
+ bool HasTemplateArgs, bool AllowTypoCorrection) {
+ if (!AllowTypoCorrection)
+ return llvm::make_unique<NoTypoCorrectionCCC>();
+ return llvm::make_unique<FunctionCallFilterCCC>(SemaRef, NumArgs,
+ HasTemplateArgs, ME);
+}
+
/// Attempts to recover from a call where no functions were found.
///
/// Returns true if new candidates were found.
@@ -10455,19 +10624,15 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
LookupResult R(SemaRef, ULE->getName(), ULE->getNameLoc(),
Sema::LookupOrdinaryName);
- FunctionCallFilterCCC Validator(SemaRef, Args.size(),
- ExplicitTemplateArgs != nullptr,
- dyn_cast<MemberExpr>(Fn));
- NoTypoCorrectionCCC RejectAll;
- CorrectionCandidateCallback *CCC = AllowTypoCorrection ?
- (CorrectionCandidateCallback*)&Validator :
- (CorrectionCandidateCallback*)&RejectAll;
if (!DiagnoseTwoPhaseLookup(SemaRef, Fn->getExprLoc(), SS, R,
OverloadCandidateSet::CSK_Normal,
ExplicitTemplateArgs, Args) &&
(!EmptyLookup ||
- SemaRef.DiagnoseEmptyLookup(S, SS, R, *CCC,
- ExplicitTemplateArgs, Args)))
+ SemaRef.DiagnoseEmptyLookup(
+ S, SS, R,
+ MakeValidator(SemaRef, dyn_cast<MemberExpr>(Fn), Args.size(),
+ ExplicitTemplateArgs != nullptr, AllowTypoCorrection),
+ ExplicitTemplateArgs, Args)))
return ExprError();
assert(!R.empty() && "lookup results empty despite recovery");
@@ -10945,10 +11110,13 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// Add operator candidates that are member functions.
AddMemberOperatorCandidates(Op, OpLoc, Args, CandidateSet);
- // Add candidates from ADL.
- AddArgumentDependentLookupCandidates(OpName, OpLoc, Args,
- /*ExplicitTemplateArgs*/ nullptr,
- CandidateSet);
+ // Add candidates from ADL. Per [over.match.oper]p2, this lookup is not
+ // performed for an assignment operator (nor for operator[] nor operator->,
+ // which don't get here).
+ if (Opc != BO_Assign)
+ AddArgumentDependentLookupCandidates(OpName, OpLoc, Args,
+ /*ExplicitTemplateArgs*/ nullptr,
+ CandidateSet);
// Add builtin operator candidates.
AddBuiltinOperatorCandidates(Op, OpLoc, Args, CandidateSet);
@@ -11031,7 +11199,12 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// Cut off the implicit 'this'.
if (isa<CXXMethodDecl>(FnDecl))
ArgsArray = ArgsArray.slice(1);
- checkCall(FnDecl, ArgsArray, 0, isa<CXXMethodDecl>(FnDecl), OpLoc,
+
+ // Check for a self move.
+ if (Op == OO_Equal)
+ DiagnoseSelfMove(Args[0], Args[1], OpLoc);
+
+ checkCall(FnDecl, ArgsArray, 0, isa<CXXMethodDecl>(FnDecl), OpLoc,
TheCall->getSourceRange(), VariadicDoesNotApply);
return MaybeBindToTemporary(TheCall);
@@ -11352,6 +11525,10 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
<< (qualsString.find(' ') == std::string::npos ? 1 : 2);
}
+ if (resultType->isMemberPointerType())
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft())
+ RequireCompleteType(LParenLoc, resultType, 0);
+
CXXMemberCallExpr *call
= new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
resultType, valueKind, RParenLoc);
@@ -11505,6 +11682,18 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
ResultType, VK, RParenLoc);
+ // (CUDA B.1): Check for invalid calls between targets.
+ if (getLangOpts().CUDA) {
+ if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext)) {
+ if (CheckCUDATarget(Caller, Method)) {
+ Diag(MemExpr->getMemberLoc(), diag::err_ref_bad_target)
+ << IdentifyCUDATarget(Method) << Method->getIdentifier()
+ << IdentifyCUDATarget(Caller);
+ return ExprError();
+ }
+ }
+ }
+
// Check for a valid return type.
if (CheckCallReturnType(Method->getReturnType(), MemExpr->getMemberLoc(),
TheCall, Method))
@@ -11568,7 +11757,8 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts))
return ExprError();
- assert(Object.get()->getType()->isRecordType() && "Requires object type argument");
+ assert(Object.get()->getType()->isRecordType() &&
+ "Requires object type argument");
const RecordType *Record = Object.get()->getType()->getAs<RecordType>();
// C++ [over.call.object]p1:
@@ -12272,5 +12462,3 @@ ExprResult Sema::FixOverloadedFunctionReference(ExprResult E,
FunctionDecl *Fn) {
return FixOverloadedFunctionReference(E.get(), Found, Fn);
}
-
-} // end namespace clang
diff --git a/lib/Sema/SemaPseudoObject.cpp b/lib/Sema/SemaPseudoObject.cpp
index aa3e89ed67a9..5e92d5d07c51 100644
--- a/lib/Sema/SemaPseudoObject.cpp
+++ b/lib/Sema/SemaPseudoObject.cpp
@@ -406,6 +406,10 @@ PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
BinaryOperatorKind opcode,
Expr *LHS, Expr *RHS) {
assert(BinaryOperator::isAssignmentOp(opcode));
+
+ // Recover from user error
+ if (isa<UnresolvedLookupExpr>(RHS))
+ return ExprError();
Expr *syntacticLHS = rebuildAndCaptureObject(LHS);
OpaqueValueExpr *capturedRHS = capture(RHS);
@@ -615,7 +619,7 @@ bool ObjCPropertyOpBuilder::findSetter(bool warn) {
if (setter->isPropertyAccessor() && warn)
if (const ObjCInterfaceDecl *IFace =
dyn_cast<ObjCInterfaceDecl>(setter->getDeclContext())) {
- const StringRef thisPropertyName(prop->getName());
+ StringRef thisPropertyName = prop->getName();
// Try flipping the case of the first character.
char front = thisPropertyName.front();
front = isLowercase(front) ? toUppercase(front) : toLowercase(front);
@@ -1022,7 +1026,8 @@ Sema::ObjCSubscriptKind
// If we don't have a class type in C++, there's no way we can get an
// expression of integral or enumeration type.
const RecordType *RecordTy = T->getAs<RecordType>();
- if (!RecordTy && T->isObjCObjectPointerType())
+ if (!RecordTy &&
+ (T->isObjCObjectPointerType() || T->isVoidPointerType()))
// All other scalar cases are assumed to be dictionary indexing which
// caller handles, with diagnostics if needed.
return OS_Dictionary;
diff --git a/lib/Sema/SemaStmt.cpp b/lib/Sema/SemaStmt.cpp
index 278e6d66828b..22ed93082065 100644
--- a/lib/Sema/SemaStmt.cpp
+++ b/lib/Sema/SemaStmt.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/TypeLoc.h"
@@ -184,6 +185,12 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
const Expr *E = dyn_cast_or_null<Expr>(S);
if (!E)
return;
+
+ // If we are in an unevaluated expression context, then there can be no unused
+ // results because the results aren't expected to be used in the first place.
+ if (isUnevaluatedContext())
+ return;
+
SourceLocation ExprLoc = E->IgnoreParens()->getExprLoc();
// In most cases, we don't want to warn if the expression is written in a
// macro body, or if the macro comes from a system header. If the offending
@@ -364,6 +371,23 @@ Sema::ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
return StmtError();
}
+ ExprResult LHS =
+ CorrectDelayedTyposInExpr(LHSVal, [this](class Expr *E) {
+ if (!getLangOpts().CPlusPlus11)
+ return VerifyIntegerConstantExpression(E);
+ if (Expr *CondExpr =
+ getCurFunction()->SwitchStack.back()->getCond()) {
+ QualType CondType = CondExpr->getType();
+ llvm::APSInt TempVal;
+ return CheckConvertedConstantExpression(E, CondType, TempVal,
+ CCEK_CaseValue);
+ }
+ return ExprError();
+ });
+ if (LHS.isInvalid())
+ return StmtError();
+ LHSVal = LHS.get();
+
if (!getLangOpts().CPlusPlus11) {
// C99 6.8.4.2p3: The expression shall be an integer constant.
// However, GCC allows any evaluatable integer expression.
@@ -381,14 +405,19 @@ Sema::ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
}
}
- LHSVal = ActOnFinishFullExpr(LHSVal, LHSVal->getExprLoc(), false,
- getLangOpts().CPlusPlus11).get();
- if (RHSVal)
- RHSVal = ActOnFinishFullExpr(RHSVal, RHSVal->getExprLoc(), false,
- getLangOpts().CPlusPlus11).get();
+ LHS = ActOnFinishFullExpr(LHSVal, LHSVal->getExprLoc(), false,
+ getLangOpts().CPlusPlus11);
+ if (LHS.isInvalid())
+ return StmtError();
+
+ auto RHS = RHSVal ? ActOnFinishFullExpr(RHSVal, RHSVal->getExprLoc(), false,
+ getLangOpts().CPlusPlus11)
+ : ExprResult();
+ if (RHS.isInvalid())
+ return StmtError();
- CaseStmt *CS = new (Context) CaseStmt(LHSVal, RHSVal, CaseLoc, DotDotDotLoc,
- ColonLoc);
+ CaseStmt *CS = new (Context)
+ CaseStmt(LHS.get(), RHS.get(), CaseLoc, DotDotDotLoc, ColonLoc);
getCurFunction()->SwitchStack.back()->addSwitchCase(CS);
return CS;
}
@@ -431,7 +460,11 @@ Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
TheDecl->setStmt(LS);
if (!TheDecl->isGnuLocal()) {
TheDecl->setLocStart(IdentLoc);
- TheDecl->setLocation(IdentLoc);
+ if (!TheDecl->isMSAsmLabel()) {
+ // Don't update the location of MS ASM labels. These will result in
+ // a diagnostic, and changing the location here will mess that up.
+ TheDecl->setLocation(IdentLoc);
+ }
}
return LS;
}
@@ -481,47 +514,6 @@ Sema::ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar,
thenStmt, ElseLoc, elseStmt);
}
-/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
-/// the specified width and sign. If an overflow occurs, detect it and emit
-/// the specified diagnostic.
-void Sema::ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &Val,
- unsigned NewWidth, bool NewSign,
- SourceLocation Loc,
- unsigned DiagID) {
- // Perform a conversion to the promoted condition type if needed.
- if (NewWidth > Val.getBitWidth()) {
- // If this is an extension, just do it.
- Val = Val.extend(NewWidth);
- Val.setIsSigned(NewSign);
-
- // If the input was signed and negative and the output is
- // unsigned, don't bother to warn: this is implementation-defined
- // behavior.
- // FIXME: Introduce a second, default-ignored warning for this case?
- } else if (NewWidth < Val.getBitWidth()) {
- // If this is a truncation, check for overflow.
- llvm::APSInt ConvVal(Val);
- ConvVal = ConvVal.trunc(NewWidth);
- ConvVal.setIsSigned(NewSign);
- ConvVal = ConvVal.extend(Val.getBitWidth());
- ConvVal.setIsSigned(Val.isSigned());
- if (ConvVal != Val)
- Diag(Loc, DiagID) << Val.toString(10) << ConvVal.toString(10);
-
- // Regardless of whether a diagnostic was emitted, really do the
- // truncation.
- Val = Val.trunc(NewWidth);
- Val.setIsSigned(NewSign);
- } else if (NewSign != Val.isSigned()) {
- // Convert the sign to match the sign of the condition. This can cause
- // overflow as well: unsigned(INTMIN)
- // We don't diagnose this overflow, because it is implementation-defined
- // behavior.
- // FIXME: Introduce a second, default-ignored warning for this case?
- Val.setIsSigned(NewSign);
- }
-}
-
namespace {
struct CaseCompareFunctor {
bool operator()(const std::pair<llvm::APSInt, CaseStmt*> &LHS,
@@ -671,33 +663,63 @@ Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond,
}
static void AdjustAPSInt(llvm::APSInt &Val, unsigned BitWidth, bool IsSigned) {
- if (Val.getBitWidth() < BitWidth)
- Val = Val.extend(BitWidth);
- else if (Val.getBitWidth() > BitWidth)
- Val = Val.trunc(BitWidth);
+ Val = Val.extOrTrunc(BitWidth);
Val.setIsSigned(IsSigned);
}
+/// Check the specified case value is in range for the given unpromoted switch
+/// type.
+static void checkCaseValue(Sema &S, SourceLocation Loc, const llvm::APSInt &Val,
+ unsigned UnpromotedWidth, bool UnpromotedSign) {
+ // If the case value was signed and negative and the switch expression is
+ // unsigned, don't bother to warn: this is implementation-defined behavior.
+ // FIXME: Introduce a second, default-ignored warning for this case?
+ if (UnpromotedWidth < Val.getBitWidth()) {
+ llvm::APSInt ConvVal(Val);
+ AdjustAPSInt(ConvVal, UnpromotedWidth, UnpromotedSign);
+ AdjustAPSInt(ConvVal, Val.getBitWidth(), Val.isSigned());
+ // FIXME: Use different diagnostics for overflow in conversion to promoted
+ // type versus "switch expression cannot have this value". Use proper
+ // IntRange checking rather than just looking at the unpromoted type here.
+ if (ConvVal != Val)
+ S.Diag(Loc, diag::warn_case_value_overflow) << Val.toString(10)
+ << ConvVal.toString(10);
+ }
+}
+
+typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl*>, 64> EnumValsTy;
+
/// Returns true if we should emit a diagnostic about this case expression not
/// being a part of the enum used in the switch controlling expression.
-static bool ShouldDiagnoseSwitchCaseNotInEnum(const ASTContext &Ctx,
+static bool ShouldDiagnoseSwitchCaseNotInEnum(const Sema &S,
const EnumDecl *ED,
- const Expr *CaseExpr) {
- // Don't warn if the 'case' expression refers to a static const variable of
- // the enum type.
- CaseExpr = CaseExpr->IgnoreParenImpCasts();
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CaseExpr)) {
+ const Expr *CaseExpr,
+ EnumValsTy::iterator &EI,
+ EnumValsTy::iterator &EIEnd,
+ const llvm::APSInt &Val) {
+ bool FlagType = ED->hasAttr<FlagEnumAttr>();
+
+ if (const DeclRefExpr *DRE =
+ dyn_cast<DeclRefExpr>(CaseExpr->IgnoreParenImpCasts())) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
- if (!VD->hasGlobalStorage())
- return true;
QualType VarType = VD->getType();
- if (!VarType.isConstQualified())
- return true;
- QualType EnumType = Ctx.getTypeDeclType(ED);
- if (Ctx.hasSameUnqualifiedType(EnumType, VarType))
+ QualType EnumType = S.Context.getTypeDeclType(ED);
+ if (VD->hasGlobalStorage() && VarType.isConstQualified() &&
+ S.Context.hasSameUnqualifiedType(EnumType, VarType))
return false;
}
}
+
+ if (FlagType) {
+ return !S.IsValueInFlagEnum(ED, Val, false);
+ } else {
+ while (EI != EIEnd && EI->first < Val)
+ EI++;
+
+ if (EI != EIEnd && EI->first == Val)
+ return false;
+ }
+
return true;
}
@@ -708,9 +730,10 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
assert(SS == getCurFunction()->SwitchStack.back() &&
"switch stack missing push/pop!");
+ getCurFunction()->SwitchStack.pop_back();
+
if (!BodyStmt) return StmtError();
SS->setBody(BodyStmt, SwitchLoc);
- getCurFunction()->SwitchStack.pop_back();
Expr *CondExpr = SS->getCond();
if (!CondExpr) return StmtError();
@@ -744,13 +767,20 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
}
}
- // Get the bitwidth of the switched-on value before promotions. We must
+ // Get the bitwidth of the switched-on value after promotions. We must
// convert the integer case values to this width before comparison.
bool HasDependentValue
= CondExpr->isTypeDependent() || CondExpr->isValueDependent();
- unsigned CondWidth
+ unsigned CondWidth = HasDependentValue ? 0 : Context.getIntWidth(CondType);
+ bool CondIsSigned = CondType->isSignedIntegerOrEnumerationType();
+
+ // Get the width and signedness that the condition might actually have, for
+ // warning purposes.
+ // FIXME: Grab an IntRange for the condition rather than using the unpromoted
+ // type.
+ unsigned CondWidthBeforePromotion
= HasDependentValue ? 0 : Context.getIntWidth(CondTypeBeforePromotion);
- bool CondIsSigned
+ bool CondIsSignedBeforePromotion
= CondTypeBeforePromotion->isSignedIntegerOrEnumerationType();
// Accumulate all of the case values in a vector so that we can sort them
@@ -816,15 +846,13 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
Lo = ImpCastExprToType(Lo, CondType, CK_IntegralCast).get();
}
- // Convert the value to the same width/sign as the condition had prior to
- // integral promotions.
- //
- // FIXME: This causes us to reject valid code:
- // switch ((char)c) { case 256: case 0: return 0; }
- // Here we claim there is a duplicated condition value, but there is not.
- ConvertIntegerToTypeWarnOnOverflow(LoVal, CondWidth, CondIsSigned,
- Lo->getLocStart(),
- diag::warn_case_value_overflow);
+ // Check the unconverted value is within the range of possible values of
+ // the switch expression.
+ checkCaseValue(*this, Lo->getLocStart(), LoVal,
+ CondWidthBeforePromotion, CondIsSignedBeforePromotion);
+
+ // Convert the value to the same width/sign as the condition.
+ AdjustAPSInt(LoVal, CondWidth, CondIsSigned);
CS->setLHS(Lo);
@@ -847,9 +875,8 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
llvm::APSInt ConstantCondValue;
bool HasConstantCond = false;
if (!HasDependentValue && !TheDefaultStmt) {
- HasConstantCond
- = CondExprBeforePromotion->EvaluateAsInt(ConstantCondValue, Context,
- Expr::SE_AllowSideEffects);
+ HasConstantCond = CondExpr->EvaluateAsInt(ConstantCondValue, Context,
+ Expr::SE_AllowSideEffects);
assert(!HasConstantCond ||
(ConstantCondValue.getBitWidth() == CondWidth &&
ConstantCondValue.isSigned() == CondIsSigned));
@@ -935,10 +962,13 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
Hi = ImpCastExprToType(Hi, CondType, CK_IntegralCast).get();
}
+ // Check the unconverted value is within the range of possible values of
+ // the switch expression.
+ checkCaseValue(*this, Hi->getLocStart(), HiVal,
+ CondWidthBeforePromotion, CondIsSignedBeforePromotion);
+
// Convert the value to the same width/sign as the condition.
- ConvertIntegerToTypeWarnOnOverflow(HiVal, CondWidth, CondIsSigned,
- Hi->getLocStart(),
- diag::warn_case_value_overflow);
+ AdjustAPSInt(HiVal, CondWidth, CondIsSigned);
CR->setRHS(Hi);
@@ -1029,8 +1059,6 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// If switch has default case, then ignore it.
if (!CaseListIsErroneous && !HasConstantCond && ET) {
const EnumDecl *ED = ET->getDecl();
- typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl*>, 64>
- EnumValsTy;
EnumValsTy EnumVals;
// Gather all enum values, set their type and sort them,
@@ -1041,57 +1069,48 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
EnumVals.push_back(std::make_pair(Val, EDI));
}
std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
- EnumValsTy::iterator EIend =
+ auto EI = EnumVals.begin(), EIEnd =
std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
// See which case values aren't in enum.
- EnumValsTy::const_iterator EI = EnumVals.begin();
for (CaseValsTy::const_iterator CI = CaseVals.begin();
- CI != CaseVals.end(); CI++) {
- while (EI != EIend && EI->first < CI->first)
- EI++;
- if (EI == EIend || EI->first > CI->first) {
- Expr *CaseExpr = CI->second->getLHS();
- if (ShouldDiagnoseSwitchCaseNotInEnum(Context, ED, CaseExpr))
- Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
- << CondTypeBeforePromotion;
- }
+ CI != CaseVals.end(); CI++) {
+ Expr *CaseExpr = CI->second->getLHS();
+ if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd,
+ CI->first))
+ Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
+ << CondTypeBeforePromotion;
}
+
// See which of case ranges aren't in enum
EI = EnumVals.begin();
for (CaseRangesTy::const_iterator RI = CaseRanges.begin();
- RI != CaseRanges.end() && EI != EIend; RI++) {
- while (EI != EIend && EI->first < RI->first)
- EI++;
-
- if (EI == EIend || EI->first != RI->first) {
- Expr *CaseExpr = RI->second->getLHS();
- if (ShouldDiagnoseSwitchCaseNotInEnum(Context, ED, CaseExpr))
- Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
- << CondTypeBeforePromotion;
- }
+ RI != CaseRanges.end(); RI++) {
+ Expr *CaseExpr = RI->second->getLHS();
+ if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd,
+ RI->first))
+ Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
+ << CondTypeBeforePromotion;
llvm::APSInt Hi =
RI->second->getRHS()->EvaluateKnownConstInt(Context);
AdjustAPSInt(Hi, CondWidth, CondIsSigned);
- while (EI != EIend && EI->first < Hi)
- EI++;
- if (EI == EIend || EI->first != Hi) {
- Expr *CaseExpr = RI->second->getRHS();
- if (ShouldDiagnoseSwitchCaseNotInEnum(Context, ED, CaseExpr))
- Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
- << CondTypeBeforePromotion;
- }
+
+ CaseExpr = RI->second->getRHS();
+ if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd,
+ Hi))
+ Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
+ << CondTypeBeforePromotion;
}
// Check which enum vals aren't in switch
- CaseValsTy::const_iterator CI = CaseVals.begin();
- CaseRangesTy::const_iterator RI = CaseRanges.begin();
+ auto CI = CaseVals.begin();
+ auto RI = CaseRanges.begin();
bool hasCasesNotInSwitch = false;
SmallVector<DeclarationName,8> UnhandledNames;
- for (EI = EnumVals.begin(); EI != EIend; EI++){
+ for (EI = EnumVals.begin(); EI != EIEnd; EI++){
// Drop unneeded case values
while (CI != CaseVals.end() && CI->first < EI->first)
CI++;
@@ -1178,30 +1197,37 @@ Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
llvm::APSInt RhsVal = SrcExpr->EvaluateKnownConstInt(Context);
AdjustAPSInt(RhsVal, DstWidth, DstIsSigned);
const EnumDecl *ED = ET->getDecl();
- typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl *>, 64>
- EnumValsTy;
- EnumValsTy EnumVals;
-
- // Gather all enum values, set their type and sort them,
- // allowing easier comparison with rhs constant.
- for (auto *EDI : ED->enumerators()) {
- llvm::APSInt Val = EDI->getInitVal();
- AdjustAPSInt(Val, DstWidth, DstIsSigned);
- EnumVals.push_back(std::make_pair(Val, EDI));
- }
- if (EnumVals.empty())
- return;
- std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
- EnumValsTy::iterator EIend =
- std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
-
- // See which values aren't in the enum.
- EnumValsTy::const_iterator EI = EnumVals.begin();
- while (EI != EIend && EI->first < RhsVal)
- EI++;
- if (EI == EIend || EI->first != RhsVal) {
- Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment)
+
+ if (ED->hasAttr<FlagEnumAttr>()) {
+ if (!IsValueInFlagEnum(ED, RhsVal, true))
+ Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment)
<< DstType.getUnqualifiedType();
+ } else {
+ typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl *>, 64>
+ EnumValsTy;
+ EnumValsTy EnumVals;
+
+ // Gather all enum values, set their type and sort them,
+ // allowing easier comparison with rhs constant.
+ for (auto *EDI : ED->enumerators()) {
+ llvm::APSInt Val = EDI->getInitVal();
+ AdjustAPSInt(Val, DstWidth, DstIsSigned);
+ EnumVals.push_back(std::make_pair(Val, EDI));
+ }
+ if (EnumVals.empty())
+ return;
+ std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
+ EnumValsTy::iterator EIend =
+ std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
+
+ // See which values aren't in the enum.
+ EnumValsTy::const_iterator EI = EnumVals.begin();
+ while (EI != EIend && EI->first < RhsVal)
+ EI++;
+ if (EI == EIend || EI->first != RhsVal) {
+ Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment)
+ << DstType.getUnqualifiedType();
+ }
}
}
}
@@ -1260,13 +1286,13 @@ namespace {
// the evaluated decls into a vector. Simple is set to true if none
// of the excluded constructs are used.
class DeclExtractor : public EvaluatedExprVisitor<DeclExtractor> {
- llvm::SmallPtrSet<VarDecl*, 8> &Decls;
+ llvm::SmallPtrSetImpl<VarDecl*> &Decls;
SmallVectorImpl<SourceRange> &Ranges;
bool Simple;
public:
typedef EvaluatedExprVisitor<DeclExtractor> Inherited;
- DeclExtractor(Sema &S, llvm::SmallPtrSet<VarDecl*, 8> &Decls,
+ DeclExtractor(Sema &S, llvm::SmallPtrSetImpl<VarDecl*> &Decls,
SmallVectorImpl<SourceRange> &Ranges) :
Inherited(S.Context),
Decls(Decls),
@@ -1338,13 +1364,13 @@ namespace {
// DeclMatcher checks to see if the decls are used in a non-evauluated
// context.
class DeclMatcher : public EvaluatedExprVisitor<DeclMatcher> {
- llvm::SmallPtrSet<VarDecl*, 8> &Decls;
+ llvm::SmallPtrSetImpl<VarDecl*> &Decls;
bool FoundDecl;
public:
typedef EvaluatedExprVisitor<DeclMatcher> Inherited;
- DeclMatcher(Sema &S, llvm::SmallPtrSet<VarDecl*, 8> &Decls,
+ DeclMatcher(Sema &S, llvm::SmallPtrSetImpl<VarDecl*> &Decls,
Stmt *Statement) :
Inherited(S.Context), Decls(Decls), FoundDecl(false) {
if (!Statement) return;
@@ -1427,8 +1453,8 @@ namespace {
if (Decls.size() == 0) return;
// Don't warn on volatile, static, or global variables.
- for (llvm::SmallPtrSet<VarDecl*, 8>::iterator I = Decls.begin(),
- E = Decls.end();
+ for (llvm::SmallPtrSetImpl<VarDecl*>::iterator I = Decls.begin(),
+ E = Decls.end();
I != E; ++I)
if ((*I)->getType().isVolatileQualified() ||
(*I)->hasGlobalStorage()) return;
@@ -1443,8 +1469,8 @@ namespace {
PDiag << 0;
else {
PDiag << Decls.size();
- for (llvm::SmallPtrSet<VarDecl*, 8>::iterator I = Decls.begin(),
- E = Decls.end();
+ for (llvm::SmallPtrSetImpl<VarDecl*>::iterator I = Decls.begin(),
+ E = Decls.end();
I != E; ++I)
PDiag << (*I)->getDeclName();
}
@@ -1660,11 +1686,16 @@ Sema::CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) {
if (!collection)
return ExprError();
+ ExprResult result = CorrectDelayedTyposInExpr(collection);
+ if (!result.isUsable())
+ return ExprError();
+ collection = result.get();
+
// Bail out early if we've got a type-dependent expression.
if (collection->isTypeDependent()) return collection;
// Perform normal l-value conversion.
- ExprResult result = DefaultFunctionArrayLvalueConversion(collection);
+ result = DefaultFunctionArrayLvalueConversion(collection);
if (result.isInvalid())
return ExprError();
collection = result.get();
@@ -2453,7 +2484,7 @@ VarDecl *Sema::getCopyElisionCandidate(QualType ReturnType,
// - in a return statement in a function [where] ...
// ... the expression is the name of a non-volatile automatic object ...
DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParens());
- if (!DR || DR->refersToEnclosingLocal())
+ if (!DR || DR->refersToEnclosingVariableOrCapture())
return nullptr;
VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
if (!VD)
@@ -2621,8 +2652,12 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
return StmtError();
RetValExp = Result.get();
+ // DR1048: even prior to C++14, we should use the 'auto' deduction rules
+ // when deducing a return type for a lambda-expression (or by extension
+ // for a block). These rules differ from the stated C++11 rules only in
+ // that they remove top-level cv-qualifiers.
if (!CurContext->isDependentContext())
- FnRetType = RetValExp->getType();
+ FnRetType = RetValExp->getType().getUnqualifiedType();
else
FnRetType = CurCap->ReturnType = Context.DependentTy;
} else {
@@ -2727,14 +2762,54 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
return Result;
}
+namespace {
+/// \brief Marks all typedefs in all local classes in a type referenced.
+///
+/// In a function like
+/// auto f() {
+/// struct S { typedef int a; };
+/// return S();
+/// }
+///
+/// the local type escapes and could be referenced in some TUs but not in
+/// others. Pretend that all local typedefs are always referenced, to not warn
+/// on this. This isn't necessary if f has internal linkage, or the typedef
+/// is private.
+class LocalTypedefNameReferencer
+ : public RecursiveASTVisitor<LocalTypedefNameReferencer> {
+public:
+ LocalTypedefNameReferencer(Sema &S) : S(S) {}
+ bool VisitRecordType(const RecordType *RT);
+private:
+ Sema &S;
+};
+bool LocalTypedefNameReferencer::VisitRecordType(const RecordType *RT) {
+ auto *R = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!R || !R->isLocalClass() || !R->isLocalClass()->isExternallyVisible() ||
+ R->isDependentType())
+ return true;
+ for (auto *TmpD : R->decls())
+ if (auto *T = dyn_cast<TypedefNameDecl>(TmpD))
+ if (T->getAccess() != AS_private || R->hasFriends())
+ S.MarkAnyDeclReferenced(T->getLocation(), T, /*OdrUse=*/false);
+ return true;
+}
+}
+
+TypeLoc Sema::getReturnTypeLoc(FunctionDecl *FD) const {
+ TypeLoc TL = FD->getTypeSourceInfo()->getTypeLoc().IgnoreParens();
+ while (auto ATL = TL.getAs<AttributedTypeLoc>())
+ TL = ATL.getModifiedLoc().IgnoreParens();
+ return TL.castAs<FunctionProtoTypeLoc>().getReturnLoc();
+}
+
/// Deduce the return type for a function from a returned expression, per
/// C++1y [dcl.spec.auto]p6.
bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr,
AutoType *AT) {
- TypeLoc OrigResultType = FD->getTypeSourceInfo()->getTypeLoc().
- IgnoreParens().castAs<FunctionProtoTypeLoc>().getReturnLoc();
+ TypeLoc OrigResultType = getReturnTypeLoc(FD);
QualType Deduced;
if (RetExpr && isa<InitListExpr>(RetExpr)) {
@@ -2772,6 +2847,11 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
if (DAR != DAR_Succeeded)
return true;
+
+ // If a local type is part of the returned type, mark its fields as
+ // referenced.
+ LocalTypedefNameReferencer Referencer(*this);
+ Referencer.TraverseType(RetExpr->getType());
} else {
// In the case of a return with no operand, the initializer is considered
// to be void().
@@ -2872,7 +2952,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// FIXME: Add a flag to the ScopeInfo to indicate whether we're performing
// deduction.
- if (getLangOpts().CPlusPlus1y) {
+ if (getLangOpts().CPlusPlus14) {
if (AutoType *AT = FnRetType->getContainedAutoType()) {
FunctionDecl *FD = cast<FunctionDecl>(CurContext);
if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) {
@@ -2964,14 +3044,26 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, nullptr);
} else if (!RetValExp && !HasDependentReturnType) {
- unsigned DiagID = diag::warn_return_missing_expr; // C90 6.6.6.4p4
- // C99 6.8.6.4p1 (ext_ since GCC warns)
- if (getLangOpts().C99) DiagID = diag::ext_return_missing_expr;
+ FunctionDecl *FD = getCurFunctionDecl();
- if (FunctionDecl *FD = getCurFunctionDecl())
+ unsigned DiagID;
+ if (getLangOpts().CPlusPlus11 && FD && FD->isConstexpr()) {
+ // C++11 [stmt.return]p2
+ DiagID = diag::err_constexpr_return_missing_expr;
+ FD->setInvalidDecl();
+ } else if (getLangOpts().C99) {
+ // C99 6.8.6.4p1 (ext_ since GCC warns)
+ DiagID = diag::ext_return_missing_expr;
+ } else {
+ // C90 6.6.6.4p4
+ DiagID = diag::warn_return_missing_expr;
+ }
+
+ if (FD)
Diag(ReturnLoc, DiagID) << FD->getIdentifier() << 0/*fn*/;
else
Diag(ReturnLoc, DiagID) << getCurMethodDecl()->getDeclName() << 1/*meth*/;
+
Result = new (Context) ReturnStmt(ReturnLoc);
} else {
assert(RetValExp || HasDependentReturnType);
@@ -3119,9 +3211,24 @@ Sema::ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand) {
if (!type->isDependentType() &&
!type->isObjCObjectPointerType()) {
const PointerType *pointerType = type->getAs<PointerType>();
- if (!pointerType || !pointerType->getPointeeType()->isVoidType())
- return Diag(atLoc, diag::error_objc_synchronized_expects_object)
- << type << operand->getSourceRange();
+ if (!pointerType || !pointerType->getPointeeType()->isVoidType()) {
+ if (getLangOpts().CPlusPlus) {
+ if (RequireCompleteType(atLoc, type,
+ diag::err_incomplete_receiver_type))
+ return Diag(atLoc, diag::error_objc_synchronized_expects_object)
+ << type << operand->getSourceRange();
+
+ ExprResult result = PerformContextuallyConvertToObjCPointer(operand);
+ if (!result.isUsable())
+ return Diag(atLoc, diag::error_objc_synchronized_expects_object)
+ << type << operand->getSourceRange();
+
+ operand = result.get();
+ } else {
+ return Diag(atLoc, diag::error_objc_synchronized_expects_object)
+ << type << operand->getSourceRange();
+ }
+ }
}
// The operand to @synchronized is a full-expression.
@@ -3249,15 +3356,16 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
return CXXTryStmt::Create(Context, TryLoc, TryBlock, Handlers);
}
-StmtResult Sema::ActOnSEHTryBlock(bool IsCXXTry, SourceLocation TryLoc,
- Stmt *TryBlock, Stmt *Handler,
- int HandlerIndex, int HandlerParentIndex) {
+StmtResult
+Sema::ActOnSEHTryBlock(bool IsCXXTry,
+ SourceLocation TryLoc,
+ Stmt *TryBlock,
+ Stmt *Handler) {
assert(TryBlock && Handler);
getCurFunction()->setHasBranchProtectedScope();
- return SEHTryStmt::Create(Context, IsCXXTry, TryLoc, TryBlock, Handler,
- HandlerIndex, HandlerParentIndex);
+ return SEHTryStmt::Create(Context,IsCXXTry,TryLoc,TryBlock,Handler);
}
StmtResult
@@ -3330,6 +3438,7 @@ Sema::CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc,
else
RD = RecordDecl::Create(Context, TTK_Struct, DC, Loc, Loc, /*Id=*/nullptr);
+ RD->setCapturedRecord();
DC->addDecl(RD);
RD->setImplicit();
RD->startDefinition();
@@ -3353,6 +3462,11 @@ static void buildCapturedStmtCaptureList(
CapturedStmt::VCK_This));
CaptureInits.push_back(Cap->getInitExpr());
continue;
+ } else if (Cap->isVLATypeCapture()) {
+ Captures.push_back(
+ CapturedStmt::Capture(Cap->getLocation(), CapturedStmt::VCK_VLAType));
+ CaptureInits.push_back(nullptr);
+ continue;
}
assert(Cap->isReferenceCapture() &&
diff --git a/lib/Sema/SemaStmtAsm.cpp b/lib/Sema/SemaStmtAsm.cpp
index 5d076cac940f..286c7619ed40 100644
--- a/lib/Sema/SemaStmtAsm.cpp
+++ b/lib/Sema/SemaStmtAsm.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
@@ -74,6 +75,32 @@ static bool isOperandMentioned(unsigned OpNo,
return false;
}
+static bool CheckNakedParmReference(Expr *E, Sema &S) {
+ FunctionDecl *Func = dyn_cast<FunctionDecl>(S.CurContext);
+ if (!Func)
+ return false;
+ if (!Func->hasAttr<NakedAttr>())
+ return false;
+
+ SmallVector<Expr*, 4> WorkList;
+ WorkList.push_back(E);
+ while (WorkList.size()) {
+ Expr *E = WorkList.pop_back_val();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (isa<ParmVarDecl>(DRE->getDecl())) {
+ S.Diag(DRE->getLocStart(), diag::err_asm_naked_parm_ref);
+ S.Diag(Func->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
+ return true;
+ }
+ }
+ for (Stmt *Child : E->children()) {
+ if (Expr *E = dyn_cast_or_null<Expr>(Child))
+ WorkList.push_back(E);
+ }
+ }
+ return false;
+}
+
StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
@@ -89,15 +116,11 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
// The parser verifies that there is a string literal here.
- if (!AsmString->isAscii())
- return StmtError(Diag(AsmString->getLocStart(),diag::err_asm_wide_character)
- << AsmString->getSourceRange());
+ assert(AsmString->isAscii());
for (unsigned i = 0; i != NumOutputs; i++) {
StringLiteral *Literal = Constraints[i];
- if (!Literal->isAscii())
- return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
- << Literal->getSourceRange());
+ assert(Literal->isAscii());
StringRef OutputName;
if (Names[i])
@@ -109,27 +132,69 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
diag::err_asm_invalid_output_constraint)
<< Info.getConstraintStr());
+ ExprResult ER = CheckPlaceholderExpr(Exprs[i]);
+ if (ER.isInvalid())
+ return StmtError();
+ Exprs[i] = ER.get();
+
// Check that the output exprs are valid lvalues.
Expr *OutputExpr = Exprs[i];
- if (CheckAsmLValue(OutputExpr, *this))
- return StmtError(Diag(OutputExpr->getLocStart(),
- diag::err_asm_invalid_lvalue_in_output)
- << OutputExpr->getSourceRange());
- if (RequireCompleteType(OutputExpr->getLocStart(), Exprs[i]->getType(),
- diag::err_dereference_incomplete_type))
+ // Referring to parameters is not allowed in naked functions.
+ if (CheckNakedParmReference(OutputExpr, *this))
return StmtError();
OutputConstraintInfos.push_back(Info);
+
+ // If this is dependent, just continue.
+ if (OutputExpr->isTypeDependent())
+ continue;
+
+ Expr::isModifiableLvalueResult IsLV =
+ OutputExpr->isModifiableLvalue(Context, /*Loc=*/nullptr);
+ switch (IsLV) {
+ case Expr::MLV_Valid:
+ // Cool, this is an lvalue.
+ break;
+ case Expr::MLV_ArrayType:
+ // This is OK too.
+ break;
+ case Expr::MLV_LValueCast: {
+ const Expr *LVal = OutputExpr->IgnoreParenNoopCasts(Context);
+ if (!getLangOpts().HeinousExtensions) {
+ Diag(LVal->getLocStart(), diag::err_invalid_asm_cast_lvalue)
+ << OutputExpr->getSourceRange();
+ } else {
+ Diag(LVal->getLocStart(), diag::warn_invalid_asm_cast_lvalue)
+ << OutputExpr->getSourceRange();
+ }
+ // Accept, even if we emitted an error diagnostic.
+ break;
+ }
+ case Expr::MLV_IncompleteType:
+ case Expr::MLV_IncompleteVoidType:
+ if (RequireCompleteType(OutputExpr->getLocStart(), Exprs[i]->getType(),
+ diag::err_dereference_incomplete_type))
+ return StmtError();
+ default:
+ return StmtError(Diag(OutputExpr->getLocStart(),
+ diag::err_asm_invalid_lvalue_in_output)
+ << OutputExpr->getSourceRange());
+ }
+
+ unsigned Size = Context.getTypeSize(OutputExpr->getType());
+ if (!Context.getTargetInfo().validateOutputSize(Literal->getString(),
+ Size))
+ return StmtError(Diag(OutputExpr->getLocStart(),
+ diag::err_asm_invalid_output_size)
+ << Info.getConstraintStr());
}
SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
for (unsigned i = NumOutputs, e = NumOutputs + NumInputs; i != e; i++) {
StringLiteral *Literal = Constraints[i];
- if (!Literal->isAscii())
- return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
- << Literal->getSourceRange());
+ assert(Literal->isAscii());
StringRef InputName;
if (Names[i])
@@ -143,8 +208,17 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
<< Info.getConstraintStr());
}
+ ExprResult ER = CheckPlaceholderExpr(Exprs[i]);
+ if (ER.isInvalid())
+ return StmtError();
+ Exprs[i] = ER.get();
+
Expr *InputExpr = Exprs[i];
+ // Referring to parameters is not allowed in naked functions.
+ if (CheckNakedParmReference(InputExpr, *this))
+ return StmtError();
+
// Only allow void types for memory constraints.
if (Info.allowsMemory() && !Info.allowsRegister()) {
if (CheckAsmLValue(InputExpr, *this))
@@ -152,6 +226,20 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
diag::err_asm_invalid_lvalue_in_input)
<< Info.getConstraintStr()
<< InputExpr->getSourceRange());
+ } else if (Info.requiresImmediateConstant() && !Info.allowsRegister()) {
+ llvm::APSInt Result;
+ if (!InputExpr->EvaluateAsInt(Result, Context))
+ return StmtError(
+ Diag(InputExpr->getLocStart(), diag::err_asm_invalid_type_in_input)
+ << InputExpr->getType() << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+ if (Result.slt(Info.getImmConstantMin()) ||
+ Result.sgt(Info.getImmConstantMax()))
+ return StmtError(Diag(InputExpr->getLocStart(),
+ diag::err_invalid_asm_value_for_constraint)
+ << Result.toString(10) << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+
} else {
ExprResult Result = DefaultFunctionArrayLvalueConversion(Exprs[i]);
if (Result.isInvalid())
@@ -191,9 +279,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// Check that the clobbers are valid.
for (unsigned i = 0; i != NumClobbers; i++) {
StringLiteral *Literal = Clobbers[i];
- if (!Literal->isAscii())
- return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
- << Literal->getSourceRange());
+ assert(Literal->isAscii());
StringRef Clobber = Literal->getString();
@@ -257,16 +343,47 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
continue;
unsigned Size = Context.getTypeSize(Ty);
- if (!Context.getTargetInfo()
- .validateConstraintModifier(Literal->getString(), Piece.getModifier(),
- Size))
+ std::string SuggestedModifier;
+ if (!Context.getTargetInfo().validateConstraintModifier(
+ Literal->getString(), Piece.getModifier(), Size,
+ SuggestedModifier)) {
Diag(Exprs[ConstraintIdx]->getLocStart(),
diag::warn_asm_mismatched_size_modifier);
+
+ if (!SuggestedModifier.empty()) {
+ auto B = Diag(Piece.getRange().getBegin(),
+ diag::note_asm_missing_constraint_modifier)
+ << SuggestedModifier;
+ SuggestedModifier = "%" + SuggestedModifier + Piece.getString();
+ B.AddFixItHint(FixItHint::CreateReplacement(Piece.getRange(),
+ SuggestedModifier));
+ }
+ }
}
// Validate tied input operands for type mismatches.
+ unsigned NumAlternatives = ~0U;
+ for (unsigned i = 0, e = OutputConstraintInfos.size(); i != e; ++i) {
+ TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
+ StringRef ConstraintStr = Info.getConstraintStr();
+ unsigned AltCount = ConstraintStr.count(',') + 1;
+ if (NumAlternatives == ~0U)
+ NumAlternatives = AltCount;
+ else if (NumAlternatives != AltCount)
+ return StmtError(Diag(NS->getOutputExpr(i)->getLocStart(),
+ diag::err_asm_unexpected_constraint_alternatives)
+ << NumAlternatives << AltCount);
+ }
for (unsigned i = 0, e = InputConstraintInfos.size(); i != e; ++i) {
TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+ StringRef ConstraintStr = Info.getConstraintStr();
+ unsigned AltCount = ConstraintStr.count(',') + 1;
+ if (NumAlternatives == ~0U)
+ NumAlternatives = AltCount;
+ else if (NumAlternatives != AltCount)
+ return StmtError(Diag(NS->getInputExpr(i)->getLocStart(),
+ diag::err_asm_unexpected_constraint_alternatives)
+ << NumAlternatives << AltCount);
// If this is a tied constraint, verify that the output and input have
// either exactly the same type, or that they are int/ptr operands with the
@@ -394,6 +511,10 @@ ExprResult Sema::LookupInlineAsmIdentifier(CXXScopeSpec &SS,
Result = CheckPlaceholderExpr(Result.get());
if (!Result.isUsable()) return Result;
+ // Referring to parameters is not allowed in naked functions.
+ if (CheckNakedParmReference(Result.get(), *this))
+ return ExprError();
+
QualType T = Result.get()->getType();
// For now, reject dependent types.
@@ -443,9 +564,10 @@ bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
NamedDecl *FoundDecl = BaseResult.getFoundDecl();
if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl))
RT = VD->getType()->getAs<RecordType>();
- else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl))
+ else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl)) {
+ MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
RT = TD->getUnderlyingType()->getAs<RecordType>();
- else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl))
+ } else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl))
RT = TD->getTypeForDecl()->getAs<RecordType>();
if (!RT)
return true;
@@ -481,6 +603,7 @@ StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc) {
bool IsSimple = (NumOutputs != 0 || NumInputs != 0);
+ getCurFunction()->setHasBranchProtectedScope();
MSAsmStmt *NS =
new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple,
/*IsVolatile*/ true, AsmToks, NumOutputs, NumInputs,
@@ -488,3 +611,34 @@ StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
Clobbers, EndLoc);
return NS;
}
+
+LabelDecl *Sema::GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
+ SourceLocation Location,
+ bool AlwaysCreate) {
+ LabelDecl* Label = LookupOrCreateLabel(PP.getIdentifierInfo(ExternalLabelName),
+ Location);
+
+ if (Label->isMSAsmLabel()) {
+ // If we have previously created this label implicitly, mark it as used.
+ Label->markUsed(Context);
+ } else {
+ // Otherwise, insert it, but only resolve it if we have seen the label itself.
+ std::string InternalName;
+ llvm::raw_string_ostream OS(InternalName);
+ // Create an internal name for the label. The name should not be a valid mangled
+ // name, and should be unique. We use a dot to make the name an invalid mangled
+ // name.
+ OS << "__MSASMLABEL_." << MSAsmLabelNameCounter++ << "__" << ExternalLabelName;
+ Label->setMSAsmLabel(OS.str());
+ }
+ if (AlwaysCreate) {
+ // The label might have been created implicitly from a previously encountered
+ // goto statement. So, for both newly created and looked up labels, we mark
+ // them as resolved.
+ Label->setMSAsmLabelResolved();
+ }
+ // Adjust their location for being able to generate accurate diagnostics.
+ Label->setLocation(Location);
+
+ return Label;
+}
diff --git a/lib/Sema/SemaStmtAttr.cpp b/lib/Sema/SemaStmtAttr.cpp
index a32e0fbcb622..19e2c8ed652f 100644
--- a/lib/Sema/SemaStmtAttr.cpp
+++ b/lib/Sema/SemaStmtAttr.cpp
@@ -47,30 +47,36 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const AttributeList &A,
SourceRange) {
IdentifierLoc *PragmaNameLoc = A.getArgAsIdent(0);
IdentifierLoc *OptionLoc = A.getArgAsIdent(1);
- IdentifierInfo *OptionInfo = OptionLoc->Ident;
- IdentifierLoc *ValueLoc = A.getArgAsIdent(2);
- IdentifierInfo *ValueInfo = ValueLoc ? ValueLoc->Ident : nullptr;
+ IdentifierLoc *StateLoc = A.getArgAsIdent(2);
Expr *ValueExpr = A.getArgAsExpr(3);
- assert(OptionInfo && "Attribute must have valid option info.");
-
+ bool PragmaUnroll = PragmaNameLoc->Ident->getName() == "unroll";
+ bool PragmaNoUnroll = PragmaNameLoc->Ident->getName() == "nounroll";
if (St->getStmtClass() != Stmt::DoStmtClass &&
St->getStmtClass() != Stmt::ForStmtClass &&
St->getStmtClass() != Stmt::CXXForRangeStmtClass &&
St->getStmtClass() != Stmt::WhileStmtClass) {
- const char *Pragma = PragmaNameLoc->Ident->getName() == "unroll"
- ? "#pragma unroll"
- : "#pragma clang loop";
+ const char *Pragma =
+ llvm::StringSwitch<const char *>(PragmaNameLoc->Ident->getName())
+ .Case("unroll", "#pragma unroll")
+ .Case("nounroll", "#pragma nounroll")
+ .Default("#pragma clang loop");
S.Diag(St->getLocStart(), diag::err_pragma_loop_precedes_nonloop) << Pragma;
return nullptr;
}
LoopHintAttr::OptionType Option;
LoopHintAttr::Spelling Spelling;
- if (PragmaNameLoc->Ident->getName() == "unroll") {
- Option = ValueLoc ? LoopHintAttr::UnrollCount : LoopHintAttr::Unroll;
+ if (PragmaUnroll) {
+ Option = ValueExpr ? LoopHintAttr::UnrollCount : LoopHintAttr::Unroll;
Spelling = LoopHintAttr::Pragma_unroll;
+ } else if (PragmaNoUnroll) {
+ Option = LoopHintAttr::Unroll;
+ Spelling = LoopHintAttr::Pragma_nounroll;
} else {
+ assert(OptionLoc && OptionLoc->Ident &&
+ "Attribute must have valid option info.");
+ IdentifierInfo *OptionInfo = OptionLoc->Ident;
Option = llvm::StringSwitch<LoopHintAttr::OptionType>(OptionInfo->getName())
.Case("vectorize", LoopHintAttr::Vectorize)
.Case("vectorize_width", LoopHintAttr::VectorizeWidth)
@@ -82,53 +88,45 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const AttributeList &A,
Spelling = LoopHintAttr::Pragma_clang_loop;
}
- int ValueInt;
- if (Option == LoopHintAttr::Unroll &&
- Spelling == LoopHintAttr::Pragma_unroll) {
- ValueInt = 1;
- } else if (Option == LoopHintAttr::Vectorize ||
- Option == LoopHintAttr::Interleave ||
- Option == LoopHintAttr::Unroll) {
- if (!ValueInfo) {
- S.Diag(ValueLoc->Loc, diag::err_pragma_loop_invalid_keyword);
- return nullptr;
- }
- if (ValueInfo->isStr("disable"))
- ValueInt = 0;
- else if (ValueInfo->isStr("enable"))
- ValueInt = 1;
- else {
- S.Diag(ValueLoc->Loc, diag::err_pragma_loop_invalid_keyword);
- return nullptr;
- }
+ LoopHintAttr::LoopHintState State = LoopHintAttr::Default;
+ if (PragmaNoUnroll) {
+ State = LoopHintAttr::Disable;
} else if (Option == LoopHintAttr::VectorizeWidth ||
Option == LoopHintAttr::InterleaveCount ||
Option == LoopHintAttr::UnrollCount) {
- // FIXME: We should support template parameters for the loop hint value.
- // See bug report #19610.
- llvm::APSInt ValueAPS;
- if (!ValueExpr || !ValueExpr->isIntegerConstantExpr(ValueAPS, S.Context) ||
- (ValueInt = ValueAPS.getSExtValue()) < 1) {
- S.Diag(ValueLoc->Loc, diag::err_pragma_loop_invalid_value);
+ assert(ValueExpr && "Attribute must have a valid value expression.");
+ if (S.CheckLoopHintExpr(ValueExpr, St->getLocStart()))
return nullptr;
+ } else if (Option == LoopHintAttr::Vectorize ||
+ Option == LoopHintAttr::Interleave ||
+ Option == LoopHintAttr::Unroll) {
+ // Default state is assumed if StateLoc is not specified, such as with
+ // '#pragma unroll'.
+ if (StateLoc && StateLoc->Ident) {
+ if (StateLoc->Ident->isStr("disable"))
+ State = LoopHintAttr::Disable;
+ else
+ State = LoopHintAttr::Enable;
}
- } else
- llvm_unreachable("Unknown loop hint option");
+ }
- return LoopHintAttr::CreateImplicit(S.Context, Spelling, Option, ValueInt,
- A.getRange());
+ return LoopHintAttr::CreateImplicit(S.Context, Spelling, Option, State,
+ ValueExpr, A.getRange());
}
-static void CheckForIncompatibleAttributes(
- Sema &S, const SmallVectorImpl<const Attr *> &Attrs) {
- // There are 3 categories of loop hints: vectorize, interleave, and
- // unroll. Each comes in two variants: an enable/disable form and a
- // form which takes a numeric argument. For example:
- // unroll(enable|disable) and unroll_count(N). The following array
- // accumulate the hints encountered while iterating through the
- // attributes to check for compatibility.
+static void
+CheckForIncompatibleAttributes(Sema &S,
+ const SmallVectorImpl<const Attr *> &Attrs) {
+ // There are 3 categories of loop hints attributes: vectorize, interleave,
+ // and unroll. Each comes in two variants: a state form and a numeric form.
+ // The state form selectively defaults/enables/disables the transformation
+ // for the loop (for unroll, default indicates full unrolling rather than
+ // enabling the transformation). The numeric form form provides an integer
+ // hint (for example, unroll count) to the transformer. The following array
+ // accumulates the hints encountered while iterating through the attributes
+ // to check for compatibility.
struct {
- const LoopHintAttr *EnableAttr;
+ const LoopHintAttr *StateAttr;
const LoopHintAttr *NumericAttr;
} HintAttrs[] = {{nullptr, nullptr}, {nullptr, nullptr}, {nullptr, nullptr}};
@@ -141,49 +139,53 @@ static void CheckForIncompatibleAttributes(
int Option = LH->getOption();
int Category;
+ enum { Vectorize, Interleave, Unroll };
switch (Option) {
case LoopHintAttr::Vectorize:
case LoopHintAttr::VectorizeWidth:
- Category = 0;
+ Category = Vectorize;
break;
case LoopHintAttr::Interleave:
case LoopHintAttr::InterleaveCount:
- Category = 1;
+ Category = Interleave;
break;
case LoopHintAttr::Unroll:
case LoopHintAttr::UnrollCount:
- Category = 2;
+ Category = Unroll;
break;
};
auto &CategoryState = HintAttrs[Category];
- SourceLocation OptionLoc = LH->getRange().getBegin();
const LoopHintAttr *PrevAttr;
if (Option == LoopHintAttr::Vectorize ||
Option == LoopHintAttr::Interleave || Option == LoopHintAttr::Unroll) {
// Enable|disable hint. For example, vectorize(enable).
- PrevAttr = CategoryState.EnableAttr;
- CategoryState.EnableAttr = LH;
+ PrevAttr = CategoryState.StateAttr;
+ CategoryState.StateAttr = LH;
} else {
// Numeric hint. For example, vectorize_width(8).
PrevAttr = CategoryState.NumericAttr;
CategoryState.NumericAttr = LH;
}
+ PrintingPolicy Policy(S.Context.getLangOpts());
+ SourceLocation OptionLoc = LH->getRange().getBegin();
if (PrevAttr)
// Cannot specify same type of attribute twice.
S.Diag(OptionLoc, diag::err_pragma_loop_compatibility)
- << /*Duplicate=*/true << PrevAttr->getDiagnosticName()
- << LH->getDiagnosticName();
-
- if (CategoryState.EnableAttr && !CategoryState.EnableAttr->getValue() &&
- CategoryState.NumericAttr) {
- // Disable hints are not compatible with numeric hints of the
- // same category.
+ << /*Duplicate=*/true << PrevAttr->getDiagnosticName(Policy)
+ << LH->getDiagnosticName(Policy);
+
+ if (CategoryState.StateAttr && CategoryState.NumericAttr &&
+ (Category == Unroll ||
+ CategoryState.StateAttr->getState() == LoopHintAttr::Disable)) {
+ // Disable hints are not compatible with numeric hints of the same
+ // category. As a special case, numeric unroll hints are also not
+ // compatible with "enable" form of the unroll pragma, unroll(full).
S.Diag(OptionLoc, diag::err_pragma_loop_compatibility)
<< /*Duplicate=*/false
- << CategoryState.EnableAttr->getDiagnosticName()
- << CategoryState.NumericAttr->getDiagnosticName();
+ << CategoryState.StateAttr->getDiagnosticName(Policy)
+ << CategoryState.NumericAttr->getDiagnosticName(Policy);
}
}
}
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index 7e8f0b721dfe..67c36a5fb5e9 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -107,7 +107,7 @@ void Sema::FilterAcceptableTemplateNames(LookupResult &R,
// template itself and not a specialization thereof, and is not
// ambiguous.
if (ClassTemplateDecl *ClassTmpl = dyn_cast<ClassTemplateDecl>(Repl))
- if (!ClassTemplates.insert(ClassTmpl)) {
+ if (!ClassTemplates.insert(ClassTmpl).second) {
filter.erase();
continue;
}
@@ -318,15 +318,14 @@ void Sema::LookupTemplateName(LookupResult &Found,
DeclarationName Name = Found.getLookupName();
Found.clear();
// Simple filter callback that, for keywords, only accepts the C++ *_cast
- CorrectionCandidateCallback FilterCCC;
- FilterCCC.WantTypeSpecifiers = false;
- FilterCCC.WantExpressionKeywords = false;
- FilterCCC.WantRemainingKeywords = false;
- FilterCCC.WantCXXNamedCasts = true;
- if (TypoCorrection Corrected = CorrectTypo(Found.getLookupNameInfo(),
- Found.getLookupKind(), S, &SS,
- FilterCCC, CTK_ErrorRecovery,
- LookupCtx)) {
+ auto FilterCCC = llvm::make_unique<CorrectionCandidateCallback>();
+ FilterCCC->WantTypeSpecifiers = false;
+ FilterCCC->WantExpressionKeywords = false;
+ FilterCCC->WantRemainingKeywords = false;
+ FilterCCC->WantCXXNamedCasts = true;
+ if (TypoCorrection Corrected = CorrectTypo(
+ Found.getLookupNameInfo(), Found.getLookupKind(), S, &SS,
+ std::move(FilterCCC), CTK_ErrorRecovery, LookupCtx)) {
Found.setLookupName(Corrected.getCorrection());
if (Corrected.getCorrectionDecl())
Found.addDecl(Corrected.getCorrectionDecl());
@@ -652,12 +651,8 @@ Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) {
// A non-type template-parameter of type "array of T" or
// "function returning T" is adjusted to be of type "pointer to
// T" or "pointer to function returning T", respectively.
- else if (T->isArrayType())
- // FIXME: Keep the type prior to promotion?
- return Context.getArrayDecayedType(T);
- else if (T->isFunctionType())
- // FIXME: Keep the type prior to promotion?
- return Context.getPointerType(T);
+ else if (T->isArrayType() || T->isFunctionType())
+ return Context.getDecayedType(T);
Diag(Loc, diag::err_template_nontype_parm_bad_type)
<< T;
@@ -720,7 +715,8 @@ Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
return Param;
TemplateArgument Converted;
- ExprResult DefaultRes = CheckTemplateArgument(Param, Param->getType(), Default, Converted);
+ ExprResult DefaultRes =
+ CheckTemplateArgument(Param, Param->getType(), Default, Converted);
if (DefaultRes.isInvalid()) {
Param->setInvalidDecl();
return Param;
@@ -1105,9 +1101,13 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
AddPushedVisibilityAttribute(NewClass);
- if (TUK != TUK_Friend)
- PushOnScopeChains(NewTemplate, S);
- else {
+ if (TUK != TUK_Friend) {
+ // Per C++ [basic.scope.temp]p2, skip the template parameter scopes.
+ Scope *Outer = S;
+ while ((Outer->getFlags() & Scope::TemplateParamScope) != 0)
+ Outer = Outer->getParent();
+ PushOnScopeChains(NewTemplate, Outer);
+ } else {
if (PrevClassTemplate && PrevClassTemplate->getAccess() != AS_none) {
NewTemplate->setAccess(PrevClassTemplate->getAccess());
NewClass->setAccess(PrevClassTemplate->getAccess());
@@ -2396,7 +2396,7 @@ makeTemplateArgumentListInfo(Sema &S, TemplateIdAnnotation &TemplateId) {
DeclResult Sema::ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc,
- TemplateParameterList *TemplateParams, VarDecl::StorageClass SC,
+ TemplateParameterList *TemplateParams, StorageClass SC,
bool IsPartialSpecialization) {
// D must be variable template id.
assert(D.getName().getKind() == UnqualifiedId::IK_TemplateId &&
@@ -4106,6 +4106,7 @@ bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
return false;
case NestedNameSpecifier::TypeSpec:
@@ -4595,8 +4596,8 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
return true;
// Create the template argument.
- Converted = TemplateArgument(cast<ValueDecl>(Entity->getCanonicalDecl()),
- ParamType->isReferenceType());
+ Converted =
+ TemplateArgument(cast<ValueDecl>(Entity->getCanonicalDecl()), ParamType);
S.MarkAnyDeclReferenced(Arg->getLocStart(), Entity, false);
return false;
}
@@ -4691,7 +4692,7 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
Converted = TemplateArgument(Arg);
} else {
VD = cast<ValueDecl>(VD->getCanonicalDecl());
- Converted = TemplateArgument(VD, /*isReferenceParam*/false);
+ Converted = TemplateArgument(VD, ParamType);
}
return Invalid;
}
@@ -4720,7 +4721,7 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
Converted = TemplateArgument(Arg);
} else {
ValueDecl *D = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
- Converted = TemplateArgument(D, /*isReferenceParam*/false);
+ Converted = TemplateArgument(D, ParamType);
}
return Invalid;
}
@@ -4738,30 +4739,152 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
///
/// This routine implements the semantics of C++ [temp.arg.nontype].
/// If an error occurred, it returns ExprError(); otherwise, it
-/// returns the converted template argument. \p
-/// InstantiatedParamType is the type of the non-type template
-/// parameter after it has been instantiated.
+/// returns the converted template argument. \p ParamType is the
+/// type of the non-type template parameter after it has been instantiated.
ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
- QualType InstantiatedParamType, Expr *Arg,
+ QualType ParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK) {
SourceLocation StartLoc = Arg->getLocStart();
// If either the parameter has a dependent type or the argument is
// type-dependent, there's nothing we can check now.
- if (InstantiatedParamType->isDependentType() || Arg->isTypeDependent()) {
+ if (ParamType->isDependentType() || Arg->isTypeDependent()) {
// FIXME: Produce a cloned, canonical expression?
Converted = TemplateArgument(Arg);
return Arg;
}
+ // We should have already dropped all cv-qualifiers by now.
+ assert(!ParamType.hasQualifiers() &&
+ "non-type template parameter type cannot be qualified");
+
+ if (CTAK == CTAK_Deduced &&
+ !Context.hasSameUnqualifiedType(ParamType, Arg->getType())) {
+ // C++ [temp.deduct.type]p17:
+ // If, in the declaration of a function template with a non-type
+ // template-parameter, the non-type template-parameter is used
+ // in an expression in the function parameter-list and, if the
+ // corresponding template-argument is deduced, the
+ // template-argument type shall match the type of the
+ // template-parameter exactly, except that a template-argument
+ // deduced from an array bound may be of any integral type.
+ Diag(StartLoc, diag::err_deduced_non_type_template_arg_type_mismatch)
+ << Arg->getType().getUnqualifiedType()
+ << ParamType.getUnqualifiedType();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+ }
+
+ if (getLangOpts().CPlusPlus1z) {
+ // FIXME: We can do some limited checking for a value-dependent but not
+ // type-dependent argument.
+ if (Arg->isValueDependent()) {
+ Converted = TemplateArgument(Arg);
+ return Arg;
+ }
+
+ // C++1z [temp.arg.nontype]p1:
+ // A template-argument for a non-type template parameter shall be
+ // a converted constant expression of the type of the template-parameter.
+ APValue Value;
+ ExprResult ArgResult = CheckConvertedConstantExpression(
+ Arg, ParamType, Value, CCEK_TemplateArg);
+ if (ArgResult.isInvalid())
+ return ExprError();
+
+ QualType CanonParamType = Context.getCanonicalType(ParamType);
+
+ // Convert the APValue to a TemplateArgument.
+ switch (Value.getKind()) {
+ case APValue::Uninitialized:
+ assert(ParamType->isNullPtrType());
+ Converted = TemplateArgument(CanonParamType, /*isNullPtr*/true);
+ break;
+ case APValue::Int:
+ assert(ParamType->isIntegralOrEnumerationType());
+ Converted = TemplateArgument(Context, Value.getInt(), CanonParamType);
+ break;
+ case APValue::MemberPointer: {
+ assert(ParamType->isMemberPointerType());
+
+ // FIXME: We need TemplateArgument representation and mangling for these.
+ if (!Value.getMemberPointerPath().empty()) {
+ Diag(Arg->getLocStart(),
+ diag::err_template_arg_member_ptr_base_derived_not_supported)
+ << Value.getMemberPointerDecl() << ParamType
+ << Arg->getSourceRange();
+ return ExprError();
+ }
+
+ auto *VD = const_cast<ValueDecl*>(Value.getMemberPointerDecl());
+ Converted = VD ? TemplateArgument(VD, CanonParamType)
+ : TemplateArgument(CanonParamType, /*isNullPtr*/true);
+ break;
+ }
+ case APValue::LValue: {
+ // For a non-type template-parameter of pointer or reference type,
+ // the value of the constant expression shall not refer to
+ assert(ParamType->isPointerType() || ParamType->isReferenceType() ||
+ ParamType->isNullPtrType());
+ // -- a temporary object
+ // -- a string literal
+ // -- the result of a typeid expression, or
+ // -- a predefind __func__ variable
+ if (auto *E = Value.getLValueBase().dyn_cast<const Expr*>()) {
+ if (isa<CXXUuidofExpr>(E)) {
+ Converted = TemplateArgument(const_cast<Expr*>(E));
+ break;
+ }
+ Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
+ << Arg->getSourceRange();
+ return ExprError();
+ }
+ auto *VD = const_cast<ValueDecl *>(
+ Value.getLValueBase().dyn_cast<const ValueDecl *>());
+ // -- a subobject
+ if (Value.hasLValuePath() && Value.getLValuePath().size() == 1 &&
+ VD && VD->getType()->isArrayType() &&
+ Value.getLValuePath()[0].ArrayIndex == 0 &&
+ !Value.isLValueOnePastTheEnd() && ParamType->isPointerType()) {
+ // Per defect report (no number yet):
+ // ... other than a pointer to the first element of a complete array
+ // object.
+ } else if (!Value.hasLValuePath() || Value.getLValuePath().size() ||
+ Value.isLValueOnePastTheEnd()) {
+ Diag(StartLoc, diag::err_non_type_template_arg_subobject)
+ << Value.getAsString(Context, ParamType);
+ return ExprError();
+ }
+ assert((VD || !ParamType->isReferenceType()) &&
+ "null reference should not be a constant expression");
+ assert((!VD || !ParamType->isNullPtrType()) &&
+ "non-null value of type nullptr_t?");
+ Converted = VD ? TemplateArgument(VD, CanonParamType)
+ : TemplateArgument(CanonParamType, /*isNullPtr*/true);
+ break;
+ }
+ case APValue::AddrLabelDiff:
+ return Diag(StartLoc, diag::err_non_type_template_arg_addr_label_diff);
+ case APValue::Float:
+ case APValue::ComplexInt:
+ case APValue::ComplexFloat:
+ case APValue::Vector:
+ case APValue::Array:
+ case APValue::Struct:
+ case APValue::Union:
+ llvm_unreachable("invalid kind for template argument");
+ }
+
+ return ArgResult.get();
+ }
+
// C++ [temp.arg.nontype]p5:
// The following conversions are performed on each expression used
// as a non-type template-argument. If a non-type
// template-argument cannot be converted to the type of the
// corresponding template-parameter then the program is
// ill-formed.
- QualType ParamType = InstantiatedParamType;
if (ParamType->isIntegralOrEnumerationType()) {
// C++11:
// -- for a non-type template-parameter of integral or
@@ -4773,23 +4896,6 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// enumeration type, integral promotions (4.5) and integral
// conversions (4.7) are applied.
- if (CTAK == CTAK_Deduced &&
- !Context.hasSameUnqualifiedType(ParamType, Arg->getType())) {
- // C++ [temp.deduct.type]p17:
- // If, in the declaration of a function template with a non-type
- // template-parameter, the non-type template-parameter is used
- // in an expression in the function parameter-list and, if the
- // corresponding template-argument is deduced, the
- // template-argument type shall match the type of the
- // template-parameter exactly, except that a template-argument
- // deduced from an array bound may be of any integral type.
- Diag(StartLoc, diag::err_deduced_non_type_template_arg_type_mismatch)
- << Arg->getType().getUnqualifiedType()
- << ParamType.getUnqualifiedType();
- Diag(Param->getLocation(), diag::note_template_param_here);
- return ExprError();
- }
-
if (getLangOpts().CPlusPlus11) {
// We can't check arbitrary value-dependent arguments.
// FIXME: If there's no viable conversion to the template parameter type,
@@ -4867,9 +4973,8 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
return ExprError();
}
- // From here on out, all we care about are the unqualified forms
- // of the parameter and argument types.
- ParamType = ParamType.getUnqualifiedType();
+ // From here on out, all we care about is the unqualified form
+ // of the argument type.
ArgType = ArgType.getUnqualifiedType();
// Try to convert the argument to the parameter's type.
@@ -4886,7 +4991,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// We can't perform this conversion.
Diag(Arg->getLocStart(),
diag::err_template_arg_not_convertible)
- << Arg->getType() << InstantiatedParamType << Arg->getSourceRange();
+ << Arg->getType() << ParamType << Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
}
@@ -6329,14 +6434,11 @@ Decl *Sema::ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
/// \brief Strips various properties off an implicit instantiation
/// that has just been explicitly specialized.
static void StripImplicitInstantiation(NamedDecl *D) {
- D->dropAttrs();
+ D->dropAttr<DLLImportAttr>();
+ D->dropAttr<DLLExportAttr>();
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
FD->setInlineSpecified(false);
-
- for (auto I : FD->params())
- I->dropAttrs();
- }
}
/// \brief Compute the diagnostic location for an explicit instantiation
@@ -7606,6 +7708,29 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// Ignore access control bits, we don't need them for redeclaration checking.
FunctionDecl *Specialization = cast<FunctionDecl>(*Result);
+ // C++11 [except.spec]p4
+ // In an explicit instantiation an exception-specification may be specified,
+ // but is not required.
+ // If an exception-specification is specified in an explicit instantiation
+ // directive, it shall be compatible with the exception-specifications of
+ // other declarations of that function.
+ if (auto *FPT = R->getAs<FunctionProtoType>())
+ if (FPT->hasExceptionSpec()) {
+ unsigned DiagID =
+ diag::err_mismatched_exception_spec_explicit_instantiation;
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::ext_mismatched_exception_spec_explicit_instantiation;
+ bool Result = CheckEquivalentExceptionSpec(
+ PDiag(DiagID) << Specialization->getType(),
+ PDiag(diag::note_explicit_instantiation_here),
+ Specialization->getType()->getAs<FunctionProtoType>(),
+ Specialization->getLocation(), FPT, D.getLocStart());
+ // In Microsoft mode, mismatching exception specifications just cause a
+ // warning.
+ if (!getLangOpts().MicrosoftExt && Result)
+ return true;
+ }
+
if (Specialization->getTemplateSpecializationKind() == TSK_Undeclared) {
Diag(D.getIdentifierLoc(),
diag::err_explicit_instantiation_member_function_not_instantiated)
@@ -7879,7 +8004,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
DeclarationName Name(&II);
LookupResult Result(*this, Name, IILoc, LookupOrdinaryName);
- LookupQualifiedName(Result, Ctx);
+ LookupQualifiedName(Result, Ctx, SS);
unsigned DiagID = 0;
Decl *Referenced = nullptr;
switch (Result.getResultKind()) {
@@ -7924,6 +8049,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) {
// We found a type. Build an ElaboratedType, since the
// typename-specifier was just sugar.
+ MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
return Context.getElaboratedType(ETK_Typename,
QualifierLoc.getNestedNameSpecifier(),
Context.getTypeDeclType(Type));
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
index 53a75d227c7b..dd2a4d26979e 100644
--- a/lib/Sema/SemaTemplateDeduction.cpp
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -262,8 +262,7 @@ checkDeducedTemplateArguments(ASTContext &Context,
// If we deduced two declarations, make sure they they refer to the
// same declaration.
if (Y.getKind() == TemplateArgument::Declaration &&
- isSameDeclaration(X.getAsDecl(), Y.getAsDecl()) &&
- X.isDeclForReferenceParam() == Y.isDeclForReferenceParam())
+ isSameDeclaration(X.getAsDecl(), Y.getAsDecl()))
return X;
// All other combinations are incompatible.
@@ -384,7 +383,7 @@ DeduceNonTypeTemplateArgument(Sema &S,
"Cannot deduce non-type template argument with depth > 0");
D = D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
- TemplateArgument New(D, NTTP->getType()->isReferenceType());
+ TemplateArgument New(D, NTTP->getType());
DeducedTemplateArgument NewDeduced(New);
DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
Deduced[NTTP->getIndex()],
@@ -1302,7 +1301,8 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// T &
case Type::LValueReference: {
- const LValueReferenceType *ReferenceArg = Arg->getAs<LValueReferenceType>();
+ const LValueReferenceType *ReferenceArg =
+ Arg->getAs<LValueReferenceType>();
if (!ReferenceArg)
return Sema::TDK_NonDeducedMismatch;
@@ -1313,7 +1313,8 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// T && [C++0x]
case Type::RValueReference: {
- const RValueReferenceType *ReferenceArg = Arg->getAs<RValueReferenceType>();
+ const RValueReferenceType *ReferenceArg =
+ Arg->getAs<RValueReferenceType>();
if (!ReferenceArg)
return Sema::TDK_NonDeducedMismatch;
@@ -1492,7 +1493,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
const RecordType *NextT = ToVisit.pop_back_val();
// If we have already seen this type, skip it.
- if (!Visited.insert(NextT))
+ if (!Visited.insert(NextT).second)
continue;
// If this is a base class, try to perform template argument
@@ -1726,8 +1727,7 @@ DeduceTemplateArguments(Sema &S,
case TemplateArgument::Declaration:
if (Arg.getKind() == TemplateArgument::Declaration &&
- isSameDeclaration(Param.getAsDecl(), Arg.getAsDecl()) &&
- Param.isDeclForReferenceParam() == Arg.isDeclForReferenceParam())
+ isSameDeclaration(Param.getAsDecl(), Arg.getAsDecl()))
return Sema::TDK_Success;
Info.FirstArg = Param;
@@ -1962,8 +1962,7 @@ static bool isSameTemplateArg(ASTContext &Context,
Context.getCanonicalType(Y.getAsType());
case TemplateArgument::Declaration:
- return isSameDeclaration(X.getAsDecl(), Y.getAsDecl()) &&
- X.isDeclForReferenceParam() == Y.isDeclForReferenceParam();
+ return isSameDeclaration(X.getAsDecl(), Y.getAsDecl());
case TemplateArgument::NullPtr:
return Context.hasSameType(X.getNullPtrType(), Y.getNullPtrType());
@@ -2056,7 +2055,8 @@ getTrivialTemplateArgumentLoc(Sema &S,
TemplateName Template = Arg.getAsTemplate();
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
Builder.MakeTrivial(S.Context, DTN->getQualifier(), Loc);
- else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ else if (QualifiedTemplateName *QTN =
+ Template.getAsQualifiedTemplateName())
Builder.MakeTrivial(S.Context, QTN->getQualifier(), Loc);
if (Arg.getKind() == TemplateArgument::Template)
@@ -2588,6 +2588,9 @@ Sema::SubstituteExplicitTemplateArguments(
= Function->getType()->getAs<FunctionProtoType>();
assert(Proto && "Function template does not have a prototype?");
+ // Isolate our substituted parameters from our caller.
+ LocalInstantiationScope InstScope(*this, /*MergeWithOuterScope*/true);
+
// Instantiate the types of each of the function parameters given the
// explicitly-specified template arguments. If the function has a trailing
// return type, substitute it after the arguments to ensure we substitute
@@ -3000,7 +3003,7 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
static QualType GetTypeOfFunction(Sema &S, const OverloadExpr::FindResult &R,
FunctionDecl *Fn) {
// We may need to deduce the return type of the function now.
- if (S.getLangOpts().CPlusPlus1y && Fn->getReturnType()->isUndeducedType() &&
+ if (S.getLangOpts().CPlusPlus14 && Fn->getReturnType()->isUndeducedType() &&
S.DeduceReturnType(Fn, R.Expression->getExprLoc(), /*Diagnose*/ false))
return QualType();
@@ -3229,9 +3232,9 @@ static bool AdjustFunctionParmAndArgTypesForDeduction(Sema &S,
return false;
}
-static bool hasDeducibleTemplateParameters(Sema &S,
- FunctionTemplateDecl *FunctionTemplate,
- QualType T);
+static bool
+hasDeducibleTemplateParameters(Sema &S, FunctionTemplateDecl *FunctionTemplate,
+ QualType T);
/// \brief Perform template argument deduction by matching a parameter type
/// against a single expression, where the expression is an element of
@@ -3488,8 +3491,8 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
}
return FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
- NumExplicitlySpecified,
- Specialization, Info, &OriginalCallArgs);
+ NumExplicitlySpecified, Specialization,
+ Info, &OriginalCallArgs);
}
QualType Sema::adjustCCAndNoReturn(QualType ArgFunctionType,
@@ -3579,7 +3582,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
// If the function has a deduced return type, substitute it for a dependent
// type so that we treat it as a non-deduced context in what follows.
bool HasDeducedReturnType = false;
- if (getLangOpts().CPlusPlus1y && InOverloadResolution &&
+ if (getLangOpts().CPlusPlus14 && InOverloadResolution &&
Function->getReturnType()->getContainedAutoType()) {
FunctionType = SubstAutoType(FunctionType, Context.DependentTy);
HasDeducedReturnType = true;
@@ -3788,7 +3791,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
// C++0x [temp.deduct.conv]p4:
// If A is a cv-qualified type, the top level cv-qualifiers of A's
- // type are ignored for type deduction. If A is a reference type, the type
+ // type are ignored for type deduction. If A is a reference type, the type
// referred to by A is used for type deduction.
A = A.getUnqualifiedType();
}
@@ -3842,8 +3845,8 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
Specialization = cast_or_null<CXXConversionDecl>(ConversionSpecialized);
// If the conversion operator is being invoked on a lambda closure to convert
- // to a ptr-to-function, use the deduced arguments from the conversion function
- // to specialize the corresponding call operator.
+ // to a ptr-to-function, use the deduced arguments from the conversion
+ // function to specialize the corresponding call operator.
// e.g., int (*fp)(int) = [](auto a) { return a; };
if (Result == TDK_Success && isLambdaConversionOperator(ConversionGeneric)) {
@@ -3907,9 +3910,10 @@ namespace {
public TreeTransform<SubstituteAutoTransform> {
QualType Replacement;
public:
- SubstituteAutoTransform(Sema &SemaRef, QualType Replacement) :
- TreeTransform<SubstituteAutoTransform>(SemaRef), Replacement(Replacement) {
- }
+ SubstituteAutoTransform(Sema &SemaRef, QualType Replacement)
+ : TreeTransform<SubstituteAutoTransform>(SemaRef),
+ Replacement(Replacement) {}
+
QualType TransformAutoType(TypeLocBuilder &TLB, AutoTypeLoc TL) {
// If we're building the type pattern to deduce against, don't wrap the
// substituted type in an AutoType. Certain template deduction rules
@@ -3988,7 +3992,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
return DAR_FailedAlreadyDiagnosed;
}
- QualType Deduced = BuildDecltypeType(Init, Init->getLocStart());
+ QualType Deduced = BuildDecltypeType(Init, Init->getLocStart(), false);
// FIXME: Support a non-canonical deduced type for 'auto'.
Deduced = Context.getCanonicalType(Deduced);
Result = SubstituteAutoTransform(*this, Deduced).Apply(Type);
@@ -4885,8 +4889,8 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
Depth, Used);
// C++0x [temp.deduct.type]p9:
- // If the template argument list of P contains a pack expansion that is not
- // the last template argument, the entire template argument list is a
+ // If the template argument list of P contains a pack expansion that is
+ // not the last template argument, the entire template argument list is a
// non-deduced context.
if (OnlyDeduced &&
hasPackExpansionBeforeEnd(Spec->getArgs(), Spec->getNumArgs()))
@@ -5069,10 +5073,9 @@ Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
/// \brief Marks all of the template parameters that will be deduced by a
/// call to the given function template.
-void
-Sema::MarkDeducedTemplateParameters(ASTContext &Ctx,
- const FunctionTemplateDecl *FunctionTemplate,
- llvm::SmallBitVector &Deduced) {
+void Sema::MarkDeducedTemplateParameters(
+ ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate,
+ llvm::SmallBitVector &Deduced) {
TemplateParameterList *TemplateParams
= FunctionTemplate->getTemplateParameters();
Deduced.clear();
diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp
index 14c64050168a..6ac7175cf30f 100644
--- a/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/lib/Sema/SemaTemplateInstantiate.cpp
@@ -448,6 +448,10 @@ void Sema::PrintInstantiationStack() {
diag::note_template_enum_def_here)
<< ED
<< Active->InstantiationRange;
+ } else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_nsdmi_here)
+ << FD << Active->InstantiationRange;
} else {
Diags.Report(Active->PointOfInstantiation,
diag::note_template_type_alias_instantiation_here)
@@ -767,6 +771,8 @@ namespace {
QualType ObjectType = QualType(),
NamedDecl *FirstQualifierInScope = nullptr);
+ const LoopHintAttr *TransformLoopHintAttr(const LoopHintAttr *LH);
+
ExprResult TransformPredefinedExpr(PredefinedExpr *E);
ExprResult TransformDeclRefExpr(DeclRefExpr *E);
ExprResult TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E);
@@ -789,11 +795,17 @@ namespace {
ExprResult TransformFunctionParmPackExpr(FunctionParmPackExpr *E);
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
- FunctionProtoTypeLoc TL);
+ FunctionProtoTypeLoc TL) {
+ // Call the base version; it will forward to our overridden version below.
+ return inherited::TransformFunctionProtoType(TLB, TL);
+ }
+
+ template<typename Fn>
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals);
+ unsigned ThisTypeQuals,
+ Fn TransformExceptionSpec);
ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
int indexAdjustment,
@@ -1023,7 +1035,7 @@ TemplateInstantiator::RebuildElaboratedType(SourceLocation KeywordLoc,
TemplateName TemplateInstantiator::TransformTemplateName(CXXScopeSpec &SS,
TemplateName Name,
- SourceLocation NameLoc,
+ SourceLocation NameLoc,
QualType ObjectType,
NamedDecl *FirstQualifierInScope) {
if (TemplateTemplateParmDecl *TTP
@@ -1127,6 +1139,24 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
return transformNonTypeTemplateParmRef(NTTP, E->getLocation(), Arg);
}
+const LoopHintAttr *
+TemplateInstantiator::TransformLoopHintAttr(const LoopHintAttr *LH) {
+ Expr *TransformedExpr = getDerived().TransformExpr(LH->getValue()).get();
+
+ if (TransformedExpr == LH->getValue())
+ return LH;
+
+ // Generate error if there is a problem with the value.
+ if (getSema().CheckLoopHintExpr(TransformedExpr, LH->getLocation()))
+ return LH;
+
+ // Create new LoopHintValueAttr with integral expression in place of the
+ // non-type template parameter.
+ return LoopHintAttr::CreateImplicit(
+ getSema().Context, LH->getSemanticSpelling(), LH->getOption(),
+ LH->getState(), TransformedExpr, LH->getRange());
+}
+
ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
NonTypeTemplateParmDecl *parm,
SourceLocation loc,
@@ -1255,8 +1285,8 @@ TemplateInstantiator::TransformFunctionParmPackRefExpr(DeclRefExpr *E,
Decl *TransformedDecl;
if (DeclArgumentPack *Pack = Found->dyn_cast<DeclArgumentPack *>()) {
- // If this is a reference to a function parameter pack which we can substitute
- // but can't yet expand, build a FunctionParmPackExpr for it.
+ // If this is a reference to a function parameter pack which we can
+ // substitute but can't yet expand, build a FunctionParmPackExpr for it.
if (getSema().ArgumentPackSubstitutionIndex == -1) {
QualType T = TransformType(E->getType());
if (T.isNull())
@@ -1307,21 +1337,16 @@ ExprResult TemplateInstantiator::TransformCXXDefaultArgExpr(
E->getParam());
}
-QualType TemplateInstantiator::TransformFunctionProtoType(TypeLocBuilder &TLB,
- FunctionProtoTypeLoc TL) {
- // We need a local instantiation scope for this function prototype.
- LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
- return inherited::TransformFunctionProtoType(TLB, TL);
-}
-
+template<typename Fn>
QualType TemplateInstantiator::TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals) {
+ unsigned ThisTypeQuals,
+ Fn TransformExceptionSpec) {
// We need a local instantiation scope for this function prototype.
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
- return inherited::TransformFunctionProtoType(TLB, TL, ThisContext,
- ThisTypeQuals);
+ return inherited::TransformFunctionProtoType(
+ TLB, TL, ThisContext, ThisTypeQuals, TransformExceptionSpec);
}
ParmVarDecl *
@@ -1556,7 +1581,8 @@ static bool NeedsInstantiationAsFunctionType(TypeSourceInfo *T) {
/// A form of SubstType intended specifically for instantiating the
/// type of a FunctionDecl. Its purpose is solely to force the
-/// instantiation of default-argument expressions.
+/// instantiation of default-argument expressions and to avoid
+/// instantiating an exception-specification.
TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &Args,
SourceLocation Loc,
@@ -1579,9 +1605,17 @@ TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
QualType Result;
- if (FunctionProtoTypeLoc Proto = TL.getAs<FunctionProtoTypeLoc>()) {
- Result = Instantiator.TransformFunctionProtoType(TLB, Proto, ThisContext,
- ThisTypeQuals);
+ if (FunctionProtoTypeLoc Proto =
+ TL.IgnoreParens().getAs<FunctionProtoTypeLoc>()) {
+ // Instantiate the type, other than its exception specification. The
+ // exception specification is instantiated in InitFunctionInstantiation
+ // once we've built the FunctionDecl.
+ // FIXME: Set the exception specification to EST_Uninstantiated here,
+ // instead of rebuilding the function type again later.
+ Result = Instantiator.TransformFunctionProtoType(
+ TLB, Proto, ThisContext, ThisTypeQuals,
+ [](FunctionProtoType::ExceptionSpecInfo &ESI,
+ bool &Changed) { return false; });
} else {
Result = Instantiator.TransformType(TLB, TL);
}
@@ -1591,6 +1625,26 @@ TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
return TLB.getTypeSourceInfo(Context, Result);
}
+void Sema::SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
+ const MultiLevelTemplateArgumentList &Args) {
+ FunctionProtoType::ExceptionSpecInfo ESI =
+ Proto->getExtProtoInfo().ExceptionSpec;
+ assert(ESI.Type != EST_Uninstantiated);
+
+ TemplateInstantiator Instantiator(*this, Args, New->getLocation(),
+ New->getDeclName());
+
+ SmallVector<QualType, 4> ExceptionStorage;
+ bool Changed = false;
+ if (Instantiator.TransformExceptionSpec(
+ New->getTypeSourceInfo()->getTypeLoc().getLocEnd(), ESI,
+ ExceptionStorage, Changed))
+ // On error, recover by dropping the exception specification.
+ ESI.Type = EST_None;
+
+ UpdateExceptionSpec(New, ESI);
+}
+
ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
@@ -1947,8 +2001,6 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
TemplateDeclInstantiator Instantiator(*this, Instantiation, TemplateArgs);
SmallVector<Decl*, 4> Fields;
- SmallVector<std::pair<FieldDecl*, FieldDecl*>, 4>
- FieldsWithMemberInitializers;
// Delay instantiation of late parsed attributes.
LateInstantiatedAttrVec LateAttrs;
Instantiator.enableLateAttributeInstantiation(&LateAttrs);
@@ -1975,10 +2027,6 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
if (NewMember) {
if (FieldDecl *Field = dyn_cast<FieldDecl>(NewMember)) {
Fields.push_back(Field);
- FieldDecl *OldField = cast<FieldDecl>(Member);
- if (OldField->getInClassInitializer())
- FieldsWithMemberInitializers.push_back(std::make_pair(OldField,
- Field));
} else if (EnumDecl *Enum = dyn_cast<EnumDecl>(NewMember)) {
// C++11 [temp.inst]p1: The implicit instantiation of a class template
// specialization causes the implicit instantiation of the definitions
@@ -2015,31 +2063,6 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
SourceLocation(), SourceLocation(), nullptr);
CheckCompletedCXXClass(Instantiation);
- // Attach any in-class member initializers now the class is complete.
- // FIXME: We are supposed to defer instantiating these until they are needed.
- if (!FieldsWithMemberInitializers.empty()) {
- // C++11 [expr.prim.general]p4:
- // Otherwise, if a member-declarator declares a non-static data member
- // (9.2) of a class X, the expression this is a prvalue of type "pointer
- // to X" within the optional brace-or-equal-initializer. It shall not
- // appear elsewhere in the member-declarator.
- CXXThisScopeRAII ThisScope(*this, Instantiation, (unsigned)0);
-
- for (unsigned I = 0, N = FieldsWithMemberInitializers.size(); I != N; ++I) {
- FieldDecl *OldField = FieldsWithMemberInitializers[I].first;
- FieldDecl *NewField = FieldsWithMemberInitializers[I].second;
- Expr *OldInit = OldField->getInClassInitializer();
-
- ActOnStartCXXInClassMemberInitializer();
- ExprResult NewInit = SubstInitializer(OldInit, TemplateArgs,
- /*CXXDirectInit=*/false);
- Expr *Init = NewInit.get();
- assert((!Init || !isa<ParenListExpr>(Init)) &&
- "call-style init in class");
- ActOnFinishCXXInClassMemberInitializer(NewField,
- Init ? Init->getLocStart() : SourceLocation(), Init);
- }
- }
// Instantiate late parsed attributes, and attach them to their decls.
// See Sema::InstantiateAttrs
for (LateInstantiatedAttrVec::iterator I = LateAttrs.begin(),
@@ -2180,6 +2203,80 @@ bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
return Instantiation->isInvalidDecl();
}
+
+/// \brief Instantiate the definition of a field from the given pattern.
+///
+/// \param PointOfInstantiation The point of instantiation within the
+/// source code.
+/// \param Instantiation is the declaration whose definition is being
+/// instantiated. This will be a class of a class temploid
+/// specialization, or a local enumeration within a function temploid
+/// specialization.
+/// \param Pattern The templated declaration from which the instantiation
+/// occurs.
+/// \param TemplateArgs The template arguments to be substituted into
+/// the pattern.
+///
+/// \return \c true if an error occurred, \c false otherwise.
+bool Sema::InstantiateInClassInitializer(
+ SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
+ FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs) {
+ // If there is no initializer, we don't need to do anything.
+ if (!Pattern->hasInClassInitializer())
+ return false;
+
+ assert(Instantiation->getInClassInitStyle() ==
+ Pattern->getInClassInitStyle() &&
+ "pattern and instantiation disagree about init style");
+
+ // Error out if we haven't parsed the initializer of the pattern yet because
+ // we are waiting for the closing brace of the outer class.
+ Expr *OldInit = Pattern->getInClassInitializer();
+ if (!OldInit) {
+ RecordDecl *PatternRD = Pattern->getParent();
+ RecordDecl *OutermostClass = PatternRD->getOuterLexicalRecordContext();
+ if (OutermostClass == PatternRD) {
+ Diag(Pattern->getLocEnd(), diag::err_in_class_initializer_not_yet_parsed)
+ << PatternRD << Pattern;
+ } else {
+ Diag(Pattern->getLocEnd(),
+ diag::err_in_class_initializer_not_yet_parsed_outer_class)
+ << PatternRD << OutermostClass << Pattern;
+ }
+ Instantiation->setInvalidDecl();
+ return true;
+ }
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
+ if (Inst.isInvalid())
+ return true;
+
+ // Enter the scope of this instantiation. We don't use PushDeclContext because
+ // we don't have a scope.
+ ContextRAII SavedContext(*this, Instantiation->getParent());
+ EnterExpressionEvaluationContext EvalContext(*this,
+ Sema::PotentiallyEvaluated);
+
+ LocalInstantiationScope Scope(*this);
+
+ // Instantiate the initializer.
+ ActOnStartCXXInClassMemberInitializer();
+ CXXThisScopeRAII ThisScope(*this, Instantiation->getParent(), /*TypeQuals=*/0);
+
+ ExprResult NewInit = SubstInitializer(OldInit, TemplateArgs,
+ /*CXXDirectInit=*/false);
+ Expr *Init = NewInit.get();
+ assert((!Init || !isa<ParenListExpr>(Init)) && "call-style init in class");
+ ActOnFinishCXXInClassMemberInitializer(
+ Instantiation, Init ? Init->getLocStart() : SourceLocation(), Init);
+
+ // Exit the scope of this instantiation.
+ SavedContext.pop();
+
+ // Return true if the in-class initializer is still missing.
+ return !Instantiation->getInClassInitializer();
+}
+
namespace {
/// \brief A partial specialization whose template arguments have matched
/// a given template-id.
@@ -2458,7 +2555,10 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
// Always skip the injected-class-name, along with any
// redeclarations of nested classes, since both would cause us
// to try to instantiate the members of a class twice.
- if (Record->isInjectedClassName() || Record->getPreviousDecl())
+ // Skip closure types; they'll get instantiated when we instantiate
+ // the corresponding lambda-expression.
+ if (Record->isInjectedClassName() || Record->getPreviousDecl() ||
+ Record->isLambda())
continue;
MemberSpecializationInfo *MSInfo = Record->getMemberSpecializationInfo();
@@ -2541,6 +2641,19 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
MSInfo->setTemplateSpecializationKind(TSK);
MSInfo->setPointOfInstantiation(PointOfInstantiation);
}
+ } else if (auto *Field = dyn_cast<FieldDecl>(D)) {
+ // No need to instantiate in-class initializers during explicit
+ // instantiation.
+ if (Field->hasInClassInitializer() && TSK == TSK_ImplicitInstantiation) {
+ CXXRecordDecl *ClassPattern =
+ Instantiation->getTemplateInstantiationPattern();
+ DeclContext::lookup_result Lookup =
+ ClassPattern->lookup(Field->getDeclName());
+ assert(Lookup.size() == 1);
+ FieldDecl *Pattern = cast<FieldDecl>(Lookup[0]);
+ InstantiateInClassInitializer(PointOfInstantiation, Field, Pattern,
+ TemplateArgs);
+ }
}
}
}
@@ -2649,8 +2762,7 @@ bool Sema::Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
return Instantiator.TransformTemplateArguments(Args, NumArgs, Result);
}
-
-static const Decl* getCanonicalParmVarDecl(const Decl *D) {
+static const Decl *getCanonicalParmVarDecl(const Decl *D) {
// When storing ParmVarDecls in the local instantiation scope, we always
// want to use the ParmVarDecl from the canonical function declaration,
// since the map is then valid for any redeclaration or definition of that
@@ -2658,7 +2770,10 @@ static const Decl* getCanonicalParmVarDecl(const Decl *D) {
if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(D)) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
unsigned i = PV->getFunctionScopeIndex();
- return FD->getCanonicalDecl()->getParamDecl(i);
+ // This parameter might be from a freestanding function type within the
+ // function and isn't necessarily referring to one of FD's parameters.
+ if (FD->getParamDecl(i) == PV)
+ return FD->getCanonicalDecl()->getParamDecl(i);
}
}
return D;
@@ -2707,12 +2822,22 @@ LocalInstantiationScope::findInstantiationOf(const Decl *D) {
void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) {
D = getCanonicalParmVarDecl(D);
llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored = LocalDecls[D];
- if (Stored.isNull())
+ if (Stored.isNull()) {
+#ifndef NDEBUG
+ // It should not be present in any surrounding scope either.
+ LocalInstantiationScope *Current = this;
+ while (Current->CombineWithOuterScope && Current->Outer) {
+ Current = Current->Outer;
+ assert(Current->LocalDecls.find(D) == Current->LocalDecls.end() &&
+ "Instantiated local in inner and outer scopes");
+ }
+#endif
Stored = Inst;
- else if (DeclArgumentPack *Pack = Stored.dyn_cast<DeclArgumentPack *>())
+ } else if (DeclArgumentPack *Pack = Stored.dyn_cast<DeclArgumentPack *>()) {
Pack->push_back(Inst);
- else
+ } else {
assert(Stored.get<Decl *>() == Inst && "Already instantiated this local");
+ }
}
void LocalInstantiationScope::InstantiatedLocalPackArg(const Decl *D,
@@ -2723,9 +2848,16 @@ void LocalInstantiationScope::InstantiatedLocalPackArg(const Decl *D,
}
void LocalInstantiationScope::MakeInstantiatedLocalArgPack(const Decl *D) {
+#ifndef NDEBUG
+ // This should be the first time we've been told about this decl.
+ for (LocalInstantiationScope *Current = this;
+ Current && Current->CombineWithOuterScope; Current = Current->Outer)
+ assert(Current->LocalDecls.find(D) == Current->LocalDecls.end() &&
+ "Creating local pack after instantiation of local");
+#endif
+
D = getCanonicalParmVarDecl(D);
llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored = LocalDecls[D];
- assert(Stored.isNull() && "Already instantiated this local");
DeclArgumentPack *Pack = new DeclArgumentPack;
Stored = Pack;
ArgumentPacks.push_back(Pack);
diff --git a/lib/Sema/SemaTemplateInstantiateDecl.cpp b/lib/Sema/SemaTemplateInstantiateDecl.cpp
index accec95bf708..40e86175b2cd 100644
--- a/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -36,14 +36,24 @@ static bool isDeclWithinFunction(const Decl *D) {
return false;
}
-bool TemplateDeclInstantiator::SubstQualifier(const DeclaratorDecl *OldDecl,
- DeclaratorDecl *NewDecl) {
+template<typename DeclT>
+static bool SubstQualifier(Sema &SemaRef, const DeclT *OldDecl, DeclT *NewDecl,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
if (!OldDecl->getQualifierLoc())
return false;
+ assert((NewDecl->getFriendObjectKind() ||
+ !OldDecl->getLexicalDeclContext()->isDependentContext()) &&
+ "non-friend with qualified name defined in dependent context");
+ Sema::ContextRAII SavedContext(
+ SemaRef,
+ const_cast<DeclContext *>(NewDecl->getFriendObjectKind()
+ ? NewDecl->getLexicalDeclContext()
+ : OldDecl->getLexicalDeclContext()));
+
NestedNameSpecifierLoc NewQualifierLoc
- = SemaRef.SubstNestedNameSpecifierLoc(OldDecl->getQualifierLoc(),
- TemplateArgs);
+ = SemaRef.SubstNestedNameSpecifierLoc(OldDecl->getQualifierLoc(),
+ TemplateArgs);
if (!NewQualifierLoc)
return true;
@@ -52,20 +62,14 @@ bool TemplateDeclInstantiator::SubstQualifier(const DeclaratorDecl *OldDecl,
return false;
}
+bool TemplateDeclInstantiator::SubstQualifier(const DeclaratorDecl *OldDecl,
+ DeclaratorDecl *NewDecl) {
+ return ::SubstQualifier(SemaRef, OldDecl, NewDecl, TemplateArgs);
+}
+
bool TemplateDeclInstantiator::SubstQualifier(const TagDecl *OldDecl,
TagDecl *NewDecl) {
- if (!OldDecl->getQualifierLoc())
- return false;
-
- NestedNameSpecifierLoc NewQualifierLoc
- = SemaRef.SubstNestedNameSpecifierLoc(OldDecl->getQualifierLoc(),
- TemplateArgs);
-
- if (!NewQualifierLoc)
- return true;
-
- NewDecl->setQualifierInfo(NewQualifierLoc);
- return false;
+ return ::SubstQualifier(SemaRef, OldDecl, NewDecl, TemplateArgs);
}
// Include attribute instantiation code.
@@ -129,6 +133,40 @@ static void instantiateDependentAlignedAttr(
}
}
+static void instantiateDependentAssumeAlignedAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const AssumeAlignedAttr *Aligned, Decl *New) {
+ // The alignment expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::ConstantEvaluated);
+
+ Expr *E, *OE = nullptr;
+ ExprResult Result = S.SubstExpr(Aligned->getAlignment(), TemplateArgs);
+ if (Result.isInvalid())
+ return;
+ E = Result.getAs<Expr>();
+
+ if (Aligned->getOffset()) {
+ Result = S.SubstExpr(Aligned->getOffset(), TemplateArgs);
+ if (Result.isInvalid())
+ return;
+ OE = Result.getAs<Expr>();
+ }
+
+ S.AddAssumeAlignedAttr(Aligned->getLocation(), New, E, OE,
+ Aligned->getSpellingListIndex());
+}
+
+static void instantiateDependentAlignValueAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const AlignValueAttr *Aligned, Decl *New) {
+ // The alignment expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::ConstantEvaluated);
+ ExprResult Result = S.SubstExpr(Aligned->getAlignment(), TemplateArgs);
+ if (!Result.isInvalid())
+ S.AddAlignValueAttr(Aligned->getLocation(), New, Result.getAs<Expr>(),
+ Aligned->getSpellingListIndex());
+}
+
static void instantiateDependentEnableIfAttr(
Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
const EnableIfAttr *A, const Decl *Tmpl, Decl *New) {
@@ -176,6 +214,18 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
continue;
}
+ const AssumeAlignedAttr *AssumeAligned = dyn_cast<AssumeAlignedAttr>(TmplAttr);
+ if (AssumeAligned) {
+ instantiateDependentAssumeAlignedAttr(*this, TemplateArgs, AssumeAligned, New);
+ continue;
+ }
+
+ const AlignValueAttr *AlignValue = dyn_cast<AlignValueAttr>(TmplAttr);
+ if (AlignValue) {
+ instantiateDependentAlignValueAttr(*this, TemplateArgs, AlignValue, New);
+ continue;
+ }
+
const EnableIfAttr *EnableIf = dyn_cast<EnableIfAttr>(TmplAttr);
if (EnableIf && EnableIf->getCond()->isValueDependent()) {
instantiateDependentEnableIfAttr(*this, TemplateArgs, EnableIf, Tmpl,
@@ -183,6 +233,14 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
continue;
}
+ // Existing DLL attribute on the instantiation takes precedence.
+ if (TmplAttr->getKind() == attr::DLLExport ||
+ TmplAttr->getKind() == attr::DLLImport) {
+ if (New->hasAttr<DLLExportAttr>() || New->hasAttr<DLLImportAttr>()) {
+ continue;
+ }
+ }
+
assert(!TmplAttr->isPackExpansion());
if (TmplAttr->isLateParsed() && LateAttrs) {
// Late parsed attributes must be instantiated and attached after the
@@ -207,6 +265,24 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
}
}
+/// Get the previous declaration of a declaration for the purposes of template
+/// instantiation. If this finds a previous declaration, then the previous
+/// declaration of the instantiation of D should be an instantiation of the
+/// result of this function.
+template<typename DeclT>
+static DeclT *getPreviousDeclForInstantiation(DeclT *D) {
+ DeclT *Result = D->getPreviousDecl();
+
+ // If the declaration is within a class, and the previous declaration was
+ // merged from a different definition of that class, then we don't have a
+ // previous declaration for the purpose of template instantiation.
+ if (Result && isa<CXXRecordDecl>(D->getDeclContext()) &&
+ D->getLexicalDeclContext() != Result->getLexicalDeclContext())
+ return nullptr;
+
+ return Result;
+}
+
Decl *
TemplateDeclInstantiator::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
llvm_unreachable("Translation units cannot be instantiated");
@@ -293,7 +369,7 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
}
}
- if (TypedefNameDecl *Prev = D->getPreviousDecl()) {
+ if (TypedefNameDecl *Prev = getPreviousDeclForInstantiation(D)) {
NamedDecl *InstPrev = SemaRef.FindInstantiatedDecl(D->getLocation(), Prev,
TemplateArgs);
if (!InstPrev)
@@ -316,13 +392,15 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
Decl *TemplateDeclInstantiator::VisitTypedefDecl(TypedefDecl *D) {
Decl *Typedef = InstantiateTypedefNameDecl(D, /*IsTypeAlias=*/false);
- Owner->addDecl(Typedef);
+ if (Typedef)
+ Owner->addDecl(Typedef);
return Typedef;
}
Decl *TemplateDeclInstantiator::VisitTypeAliasDecl(TypeAliasDecl *D) {
Decl *Typedef = InstantiateTypedefNameDecl(D, /*IsTypeAlias=*/true);
- Owner->addDecl(Typedef);
+ if (Typedef)
+ Owner->addDecl(Typedef);
return Typedef;
}
@@ -340,7 +418,7 @@ TemplateDeclInstantiator::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
TypeAliasDecl *Pattern = D->getTemplatedDecl();
TypeAliasTemplateDecl *PrevAliasTemplate = nullptr;
- if (Pattern->getPreviousDecl()) {
+ if (getPreviousDeclForInstantiation<TypedefNameDecl>(Pattern)) {
DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName());
if (!Found.empty()) {
PrevAliasTemplate = dyn_cast<TypeAliasTemplateDecl>(Found.front());
@@ -355,6 +433,7 @@ TemplateDeclInstantiator::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
TypeAliasTemplateDecl *Inst
= TypeAliasTemplateDecl::Create(SemaRef.Context, Owner, D->getLocation(),
D->getDeclName(), InstParams, AliasInst);
+ AliasInst->setDescribedAliasTemplate(Inst);
if (PrevAliasTemplate)
Inst->setPreviousDecl(PrevAliasTemplate);
@@ -578,11 +657,12 @@ Decl *TemplateDeclInstantiator::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
}
QualType T = cast<FieldDecl>(NamedChain[i-1])->getType();
- IndirectFieldDecl* IndirectField
- = IndirectFieldDecl::Create(SemaRef.Context, Owner, D->getLocation(),
- D->getIdentifier(), T,
- NamedChain, D->getChainingSize());
+ IndirectFieldDecl *IndirectField = IndirectFieldDecl::Create(
+ SemaRef.Context, Owner, D->getLocation(), D->getIdentifier(), T,
+ NamedChain, D->getChainingSize());
+ for (const auto *Attr : D->attrs())
+ IndirectField->addAttr(Attr->clone(SemaRef.Context));
IndirectField->setImplicit(D->isImplicit());
IndirectField->setAccess(D->getAccess());
@@ -659,9 +739,9 @@ Decl *TemplateDeclInstantiator::VisitStaticAssertDecl(StaticAssertDecl *D) {
Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
EnumDecl *PrevDecl = nullptr;
- if (D->getPreviousDecl()) {
+ if (EnumDecl *PatternPrev = getPreviousDeclForInstantiation(D)) {
NamedDecl *Prev = SemaRef.FindInstantiatedDecl(D->getLocation(),
- D->getPreviousDecl(),
+ PatternPrev,
TemplateArgs);
if (!Prev) return nullptr;
PrevDecl = cast<EnumDecl>(Prev);
@@ -823,7 +903,7 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
CXXRecordDecl *PrevDecl = nullptr;
ClassTemplateDecl *PrevClassTemplate = nullptr;
- if (!isFriend && Pattern->getPreviousDecl()) {
+ if (!isFriend && getPreviousDeclForInstantiation(Pattern)) {
DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName());
if (!Found.empty()) {
PrevClassTemplate = dyn_cast<ClassTemplateDecl>(Found.front());
@@ -1017,7 +1097,7 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateDecl(VarTemplateDecl *D) {
VarDecl *Pattern = D->getTemplatedDecl();
VarTemplateDecl *PrevVarTemplate = nullptr;
- if (Pattern->getPreviousDecl()) {
+ if (getPreviousDeclForInstantiation(Pattern)) {
DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName());
if (!Found.empty())
PrevVarTemplate = dyn_cast<VarTemplateDecl>(Found.front());
@@ -1127,7 +1207,7 @@ TemplateDeclInstantiator::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
if (!isFriend) {
Owner->addDecl(InstTemplate);
} else if (InstTemplate->getDeclContext()->isRecord() &&
- !D->getPreviousDecl()) {
+ !getPreviousDeclForInstantiation(D)) {
SemaRef.CheckFriendAccess(InstTemplate);
}
@@ -1138,9 +1218,9 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
CXXRecordDecl *PrevDecl = nullptr;
if (D->isInjectedClassName())
PrevDecl = cast<CXXRecordDecl>(Owner);
- else if (D->getPreviousDecl()) {
+ else if (CXXRecordDecl *PatternPrev = getPreviousDeclForInstantiation(D)) {
NamedDecl *Prev = SemaRef.FindInstantiatedDecl(D->getLocation(),
- D->getPreviousDecl(),
+ PatternPrev,
TemplateArgs);
if (!Prev) return nullptr;
PrevDecl = cast<CXXRecordDecl>(Prev);
@@ -1191,6 +1271,9 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
SemaRef.InstantiateClassMembers(D->getLocation(), Record, TemplateArgs,
TSK_ImplicitInstantiation);
}
+
+ SemaRef.DiagnoseUnusedNestedTypedefs(Record);
+
return Record;
}
@@ -2201,7 +2284,8 @@ Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
if (CheckRedeclaration) {
if (SemaRef.CheckUsingShadowDecl(NewUD, InstTarget, Prev, PrevDecl))
continue;
- } else if (UsingShadowDecl *OldPrev = Shadow->getPreviousDecl()) {
+ } else if (UsingShadowDecl *OldPrev =
+ getPreviousDeclForInstantiation(Shadow)) {
PrevDecl = cast_or_null<UsingShadowDecl>(SemaRef.FindInstantiatedDecl(
Shadow->getLocation(), OldPrev, TemplateArgs));
}
@@ -2276,8 +2360,10 @@ Decl * TemplateDeclInstantiator
Decl *TemplateDeclInstantiator::VisitClassScopeFunctionSpecializationDecl(
ClassScopeFunctionSpecializationDecl *Decl) {
CXXMethodDecl *OldFD = Decl->getSpecialization();
- CXXMethodDecl *NewFD = cast<CXXMethodDecl>(VisitCXXMethodDecl(OldFD,
- nullptr, true));
+ CXXMethodDecl *NewFD =
+ cast_or_null<CXXMethodDecl>(VisitCXXMethodDecl(OldFD, nullptr, true));
+ if (!NewFD)
+ return nullptr;
LookupResult Previous(SemaRef, NewFD->getNameInfo(), Sema::LookupOrdinaryName,
Sema::ForRedeclaration);
@@ -2976,7 +3062,7 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
/// Introduce the instantiated function parameters into the local
/// instantiation scope, and set the parameter names to those used
/// in the template.
-static void addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
+static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
const FunctionDecl *PatternDecl,
LocalInstantiationScope &Scope,
const MultiLevelTemplateArgumentList &TemplateArgs) {
@@ -2987,15 +3073,22 @@ static void addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
// Simple case: not a parameter pack.
assert(FParamIdx < Function->getNumParams());
ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
+ FunctionParam->setDeclName(PatternParam->getDeclName());
// If the parameter's type is not dependent, update it to match the type
// in the pattern. They can differ in top-level cv-qualifiers, and we want
// the pattern's type here. If the type is dependent, they can't differ,
- // per core issue 1668.
+ // per core issue 1668. Substitute into the type from the pattern, in case
+ // it's instantiation-dependent.
// FIXME: Updating the type to work around this is at best fragile.
- if (!PatternDecl->getType()->isDependentType())
- FunctionParam->setType(PatternParam->getType());
+ if (!PatternDecl->getType()->isDependentType()) {
+ QualType T = S.SubstType(PatternParam->getType(), TemplateArgs,
+ FunctionParam->getLocation(),
+ FunctionParam->getDeclName());
+ if (T.isNull())
+ return true;
+ FunctionParam->setType(T);
+ }
- FunctionParam->setDeclName(PatternParam->getDeclName());
Scope.InstantiatedLocal(PatternParam, FunctionParam);
++FParamIdx;
continue;
@@ -3007,137 +3100,27 @@ static void addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
= S.getNumArgumentsInExpansion(PatternParam->getType(), TemplateArgs);
assert(NumArgumentsInExpansion &&
"should only be called when all template arguments are known");
+ QualType PatternType =
+ PatternParam->getType()->castAs<PackExpansionType>()->getPattern();
for (unsigned Arg = 0; Arg < *NumArgumentsInExpansion; ++Arg) {
ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
- if (!PatternDecl->getType()->isDependentType())
- FunctionParam->setType(PatternParam->getType());
-
FunctionParam->setDeclName(PatternParam->getDeclName());
- Scope.InstantiatedLocalPackArg(PatternParam, FunctionParam);
- ++FParamIdx;
- }
- }
-}
-
-static void InstantiateExceptionSpec(Sema &SemaRef, FunctionDecl *New,
- const FunctionProtoType *Proto,
- const MultiLevelTemplateArgumentList &TemplateArgs) {
- assert(Proto->getExceptionSpecType() != EST_Uninstantiated);
-
- // C++11 [expr.prim.general]p3:
- // If a declaration declares a member function or member function
- // template of a class X, the expression this is a prvalue of type
- // "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
- // and the end of the function-definition, member-declarator, or
- // declarator.
- CXXRecordDecl *ThisContext = nullptr;
- unsigned ThisTypeQuals = 0;
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(New)) {
- ThisContext = Method->getParent();
- ThisTypeQuals = Method->getTypeQualifiers();
- }
- Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, ThisTypeQuals,
- SemaRef.getLangOpts().CPlusPlus11);
-
- // The function has an exception specification or a "noreturn"
- // attribute. Substitute into each of the exception types.
- SmallVector<QualType, 4> Exceptions;
- for (unsigned I = 0, N = Proto->getNumExceptions(); I != N; ++I) {
- // FIXME: Poor location information!
- if (const PackExpansionType *PackExpansion
- = Proto->getExceptionType(I)->getAs<PackExpansionType>()) {
- // We have a pack expansion. Instantiate it.
- SmallVector<UnexpandedParameterPack, 2> Unexpanded;
- SemaRef.collectUnexpandedParameterPacks(PackExpansion->getPattern(),
- Unexpanded);
- assert(!Unexpanded.empty() &&
- "Pack expansion without parameter packs?");
-
- bool Expand = false;
- bool RetainExpansion = false;
- Optional<unsigned> NumExpansions = PackExpansion->getNumExpansions();
- if (SemaRef.CheckParameterPacksForExpansion(New->getLocation(),
- SourceRange(),
- Unexpanded,
- TemplateArgs,
- Expand,
- RetainExpansion,
- NumExpansions))
- break;
-
- if (!Expand) {
- // We can't expand this pack expansion into separate arguments yet;
- // just substitute into the pattern and create a new pack expansion
- // type.
- Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, -1);
- QualType T = SemaRef.SubstType(PackExpansion->getPattern(),
- TemplateArgs,
- New->getLocation(), New->getDeclName());
+ if (!PatternDecl->getType()->isDependentType()) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(S, Arg);
+ QualType T = S.SubstType(PatternType, TemplateArgs,
+ FunctionParam->getLocation(),
+ FunctionParam->getDeclName());
if (T.isNull())
- break;
-
- T = SemaRef.Context.getPackExpansionType(T, NumExpansions);
- Exceptions.push_back(T);
- continue;
- }
-
- // Substitute into the pack expansion pattern for each template
- bool Invalid = false;
- for (unsigned ArgIdx = 0; ArgIdx != *NumExpansions; ++ArgIdx) {
- Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, ArgIdx);
-
- QualType T = SemaRef.SubstType(PackExpansion->getPattern(),
- TemplateArgs,
- New->getLocation(), New->getDeclName());
- if (T.isNull()) {
- Invalid = true;
- break;
- }
-
- Exceptions.push_back(T);
+ return true;
+ FunctionParam->setType(T);
}
- if (Invalid)
- break;
-
- continue;
- }
-
- QualType T
- = SemaRef.SubstType(Proto->getExceptionType(I), TemplateArgs,
- New->getLocation(), New->getDeclName());
- if (T.isNull() ||
- SemaRef.CheckSpecifiedExceptionType(T, New->getLocation()))
- continue;
-
- Exceptions.push_back(T);
- }
- Expr *NoexceptExpr = nullptr;
- if (Expr *OldNoexceptExpr = Proto->getNoexceptExpr()) {
- EnterExpressionEvaluationContext Unevaluated(SemaRef,
- Sema::ConstantEvaluated);
- ExprResult E = SemaRef.SubstExpr(OldNoexceptExpr, TemplateArgs);
- if (E.isUsable())
- E = SemaRef.CheckBooleanCondition(E.get(), E.get()->getLocStart());
-
- if (E.isUsable()) {
- NoexceptExpr = E.get();
- if (!NoexceptExpr->isTypeDependent() &&
- !NoexceptExpr->isValueDependent())
- NoexceptExpr
- = SemaRef.VerifyIntegerConstantExpression(NoexceptExpr,
- nullptr, diag::err_noexcept_needs_constant_expression,
- /*AllowFold*/ false).get();
+ Scope.InstantiatedLocalPackArg(PatternParam, FunctionParam);
+ ++FParamIdx;
}
}
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.ExceptionSpecType = Proto->getExceptionSpecType();
- EPI.NumExceptions = Exceptions.size();
- EPI.Exceptions = Exceptions.data();
- EPI.NoexceptExpr = NoexceptExpr;
-
- SemaRef.UpdateExceptionSpec(New, EPI);
+ return false;
}
void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
@@ -3151,9 +3134,7 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
if (Inst.isInvalid()) {
// We hit the instantiation depth limit. Clear the exception specification
// so that our callers don't have to cope with EST_Uninstantiated.
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.ExceptionSpecType = EST_None;
- UpdateExceptionSpec(Decl, EPI);
+ UpdateExceptionSpec(Decl, EST_None);
return;
}
@@ -3166,11 +3147,14 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
getTemplateInstantiationArgs(Decl, nullptr, /*RelativeToPrimary*/true);
FunctionDecl *Template = Proto->getExceptionSpecTemplate();
- addInstantiatedParametersToScope(*this, Decl, Template, Scope, TemplateArgs);
+ if (addInstantiatedParametersToScope(*this, Decl, Template, Scope,
+ TemplateArgs)) {
+ UpdateExceptionSpec(Decl, EST_None);
+ return;
+ }
- ::InstantiateExceptionSpec(*this, Decl,
- Template->getType()->castAs<FunctionProtoType>(),
- TemplateArgs);
+ SubstExceptionSpec(Decl, Template->getType()->castAs<FunctionProtoType>(),
+ TemplateArgs);
}
/// \brief Initializes the common fields of an instantiation function
@@ -3218,14 +3202,14 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
// DR1330: In C++11, defer instantiation of a non-trivial
// exception specification.
if (SemaRef.getLangOpts().CPlusPlus11 &&
- EPI.ExceptionSpecType != EST_None &&
- EPI.ExceptionSpecType != EST_DynamicNone &&
- EPI.ExceptionSpecType != EST_BasicNoexcept) {
+ EPI.ExceptionSpec.Type != EST_None &&
+ EPI.ExceptionSpec.Type != EST_DynamicNone &&
+ EPI.ExceptionSpec.Type != EST_BasicNoexcept) {
FunctionDecl *ExceptionSpecTemplate = Tmpl;
- if (EPI.ExceptionSpecType == EST_Uninstantiated)
- ExceptionSpecTemplate = EPI.ExceptionSpecTemplate;
+ if (EPI.ExceptionSpec.Type == EST_Uninstantiated)
+ ExceptionSpecTemplate = EPI.ExceptionSpec.SourceTemplate;
ExceptionSpecificationType NewEST = EST_Uninstantiated;
- if (EPI.ExceptionSpecType == EST_Unevaluated)
+ if (EPI.ExceptionSpec.Type == EST_Unevaluated)
NewEST = EST_Unevaluated;
// Mark the function has having an uninstantiated exception specification.
@@ -3233,13 +3217,13 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
= New->getType()->getAs<FunctionProtoType>();
assert(NewProto && "Template instantiation without function prototype?");
EPI = NewProto->getExtProtoInfo();
- EPI.ExceptionSpecType = NewEST;
- EPI.ExceptionSpecDecl = New;
- EPI.ExceptionSpecTemplate = ExceptionSpecTemplate;
+ EPI.ExceptionSpec.Type = NewEST;
+ EPI.ExceptionSpec.SourceDecl = New;
+ EPI.ExceptionSpec.SourceTemplate = ExceptionSpecTemplate;
New->setType(SemaRef.Context.getFunctionType(
NewProto->getReturnType(), NewProto->getParamTypes(), EPI));
} else {
- ::InstantiateExceptionSpec(SemaRef, New, Proto, TemplateArgs);
+ SemaRef.SubstExceptionSpec(New, Proto, TemplateArgs);
}
}
@@ -3322,6 +3306,20 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
return;
}
+ // If we're performing recursive template instantiation, create our own
+ // queue of pending implicit instantiations that we will instantiate later,
+ // while we're still within our own instantiation context.
+ // This has to happen before LateTemplateParser below is called, so that
+ // it marks vtables used in late parsed templates as used.
+ SavePendingLocalImplicitInstantiationsRAII
+ SavedPendingLocalImplicitInstantiations(*this);
+ std::unique_ptr<SavePendingInstantiationsAndVTableUsesRAII>
+ SavePendingInstantiationsAndVTableUses;
+ if (Recursive) {
+ SavePendingInstantiationsAndVTableUses.reset(
+ new SavePendingInstantiationsAndVTableUsesRAII(*this));
+ }
+
// Call the LateTemplateParser callback if there is a need to late parse
// a templated function definition.
if (!Pattern && PatternDecl->isLateTemplateParsed() &&
@@ -3353,6 +3351,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
Function->setInvalidDecl();
} else if (Function->getTemplateSpecializationKind()
== TSK_ExplicitInstantiationDefinition) {
+ assert(!Recursive);
PendingInstantiations.push_back(
std::make_pair(Function, PointOfInstantiation));
}
@@ -3389,18 +3388,6 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// Copy the inner loc start from the pattern.
Function->setInnerLocStart(PatternDecl->getInnerLocStart());
- // If we're performing recursive template instantiation, create our own
- // queue of pending implicit instantiations that we will instantiate later,
- // while we're still within our own instantiation context.
- SmallVector<VTableUse, 16> SavedVTableUses;
- std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
- SavePendingLocalImplicitInstantiationsRAII
- SavedPendingLocalImplicitInstantiations(*this);
- if (Recursive) {
- VTableUses.swap(SavedVTableUses);
- PendingInstantiations.swap(SavedPendingInstantiations);
- }
-
EnterExpressionEvaluationContext EvalContext(*this,
Sema::PotentiallyEvaluated);
@@ -3417,17 +3404,24 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
if (PatternDecl->isDefaulted())
SetDeclDefaulted(Function, PatternDecl->getLocation());
else {
+ MultiLevelTemplateArgumentList TemplateArgs =
+ getTemplateInstantiationArgs(Function, nullptr, false, PatternDecl);
+
+ // Substitute into the qualifier; we can get a substitution failure here
+ // through evil use of alias templates.
+ // FIXME: Is CurContext correct for this? Should we go to the (instantiation
+ // of the) lexical context of the pattern?
+ SubstQualifier(*this, PatternDecl, Function, TemplateArgs);
+
ActOnStartOfFunctionDef(nullptr, Function);
// Enter the scope of this instantiation. We don't use
// PushDeclContext because we don't have a scope.
Sema::ContextRAII savedContext(*this, Function);
- MultiLevelTemplateArgumentList TemplateArgs =
- getTemplateInstantiationArgs(Function, nullptr, false, PatternDecl);
-
- addInstantiatedParametersToScope(*this, Function, PatternDecl, Scope,
- TemplateArgs);
+ if (addInstantiatedParametersToScope(*this, Function, PatternDecl, Scope,
+ TemplateArgs))
+ return;
// If this is a constructor, instantiate the member initializers.
if (const CXXConstructorDecl *Ctor =
@@ -3469,15 +3463,8 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// instantiation of this template.
PerformPendingInstantiations();
- // Restore the set of pending vtables.
- assert(VTableUses.empty() &&
- "VTableUses should be empty before it is discarded.");
- VTableUses.swap(SavedVTableUses);
-
- // Restore the set of pending implicit instantiations.
- assert(PendingInstantiations.empty() &&
- "PendingInstantiations should be empty before it is discarded.");
- PendingInstantiations.swap(SavedPendingInstantiations);
+ // Restore PendingInstantiations and VTableUses.
+ SavePendingInstantiationsAndVTableUses.reset();
}
}
@@ -3651,7 +3638,7 @@ void Sema::BuildVariableInstantiation(
// Diagnose unused local variables with dependent types, where the diagnostic
// will have been deferred.
if (!NewVar->isInvalidDecl() &&
- NewVar->getDeclContext()->isFunctionOrMethod() && !NewVar->isUsed() &&
+ NewVar->getDeclContext()->isFunctionOrMethod() &&
OldVar->getType()->isDependentType())
DiagnoseUnusedDecl(NewVar);
}
@@ -3793,11 +3780,11 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
// If we're performing recursive template instantiation, create our own
// queue of pending implicit instantiations that we will instantiate
// later, while we're still within our own instantiation context.
- SmallVector<VTableUse, 16> SavedVTableUses;
- std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
+ std::unique_ptr<SavePendingInstantiationsAndVTableUsesRAII>
+ SavePendingInstantiationsAndVTableUses;
if (Recursive) {
- VTableUses.swap(SavedVTableUses);
- PendingInstantiations.swap(SavedPendingInstantiations);
+ SavePendingInstantiationsAndVTableUses.reset(
+ new SavePendingInstantiationsAndVTableUsesRAII(*this));
}
LocalInstantiationScope Local(*this);
@@ -3825,15 +3812,8 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
// instantiation of this template.
PerformPendingInstantiations();
- // Restore the set of pending vtables.
- assert(VTableUses.empty() &&
- "VTableUses should be empty before it is discarded.");
- VTableUses.swap(SavedVTableUses);
-
- // Restore the set of pending implicit instantiations.
- assert(PendingInstantiations.empty() &&
- "PendingInstantiations should be empty before it is discarded.");
- PendingInstantiations.swap(SavedPendingInstantiations);
+ // Restore PendingInstantiations and VTableUses.
+ SavePendingInstantiationsAndVTableUses.reset();
}
}
@@ -3917,13 +3897,13 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
// If we're performing recursive template instantiation, create our own
// queue of pending implicit instantiations that we will instantiate later,
// while we're still within our own instantiation context.
- SmallVector<VTableUse, 16> SavedVTableUses;
- std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
SavePendingLocalImplicitInstantiationsRAII
SavedPendingLocalImplicitInstantiations(*this);
+ std::unique_ptr<SavePendingInstantiationsAndVTableUsesRAII>
+ SavePendingInstantiationsAndVTableUses;
if (Recursive) {
- VTableUses.swap(SavedVTableUses);
- PendingInstantiations.swap(SavedPendingInstantiations);
+ SavePendingInstantiationsAndVTableUses.reset(
+ new SavePendingInstantiationsAndVTableUsesRAII(*this));
}
// Enter the scope of this instantiation. We don't use
@@ -3990,15 +3970,8 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
// instantiation of this template.
PerformPendingInstantiations();
- // Restore the set of pending vtables.
- assert(VTableUses.empty() &&
- "VTableUses should be empty before it is discarded.");
- VTableUses.swap(SavedVTableUses);
-
- // Restore the set of pending implicit instantiations.
- assert(PendingInstantiations.empty() &&
- "PendingInstantiations should be empty before it is discarded.");
- PendingInstantiations.swap(SavedPendingInstantiations);
+ // Restore PendingInstantiations and VTableUses.
+ SavePendingInstantiationsAndVTableUses.reset();
}
}
@@ -4235,25 +4208,26 @@ static bool isInstantiationOf(EnumDecl *Pattern,
static bool isInstantiationOf(UsingShadowDecl *Pattern,
UsingShadowDecl *Instance,
ASTContext &C) {
- return C.getInstantiatedFromUsingShadowDecl(Instance) == Pattern;
+ return declaresSameEntity(C.getInstantiatedFromUsingShadowDecl(Instance),
+ Pattern);
}
static bool isInstantiationOf(UsingDecl *Pattern,
UsingDecl *Instance,
ASTContext &C) {
- return C.getInstantiatedFromUsingDecl(Instance) == Pattern;
+ return declaresSameEntity(C.getInstantiatedFromUsingDecl(Instance), Pattern);
}
static bool isInstantiationOf(UnresolvedUsingValueDecl *Pattern,
UsingDecl *Instance,
ASTContext &C) {
- return C.getInstantiatedFromUsingDecl(Instance) == Pattern;
+ return declaresSameEntity(C.getInstantiatedFromUsingDecl(Instance), Pattern);
}
static bool isInstantiationOf(UnresolvedUsingTypenameDecl *Pattern,
UsingDecl *Instance,
ASTContext &C) {
- return C.getInstantiatedFromUsingDecl(Instance) == Pattern;
+ return declaresSameEntity(C.getInstantiatedFromUsingDecl(Instance), Pattern);
}
static bool isInstantiationOfStaticDataMember(VarDecl *Pattern,
@@ -4319,8 +4293,8 @@ static bool isInstantiationOf(ASTContext &Ctx, NamedDecl *D, Decl *Other) {
if (FieldDecl *Field = dyn_cast<FieldDecl>(Other)) {
if (!Field->getDeclName()) {
// This is an unnamed field.
- return Ctx.getInstantiatedFromUnnamedFieldDecl(Field) ==
- cast<FieldDecl>(D);
+ return declaresSameEntity(Ctx.getInstantiatedFromUnnamedFieldDecl(Field),
+ cast<FieldDecl>(D));
}
}
@@ -4412,17 +4386,17 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
(isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda())) {
// D is a local of some kind. Look into the map of local
// declarations to their instantiations.
- typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
- llvm::PointerUnion<Decl *, DeclArgumentPack *> *Found
- = CurrentInstantiationScope->findInstantiationOf(D);
-
- if (Found) {
- if (Decl *FD = Found->dyn_cast<Decl *>())
- return cast<NamedDecl>(FD);
-
- int PackIdx = ArgumentPackSubstitutionIndex;
- assert(PackIdx != -1 && "found declaration pack but not pack expanding");
- return cast<NamedDecl>((*Found->get<DeclArgumentPack *>())[PackIdx]);
+ if (CurrentInstantiationScope) {
+ if (auto Found = CurrentInstantiationScope->findInstantiationOf(D)) {
+ if (Decl *FD = Found->dyn_cast<Decl *>())
+ return cast<NamedDecl>(FD);
+
+ int PackIdx = ArgumentPackSubstitutionIndex;
+ assert(PackIdx != -1 &&
+ "found declaration pack but not pack expanding");
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+ return cast<NamedDecl>((*Found->get<DeclArgumentPack *>())[PackIdx]);
+ }
}
// If we're performing a partial substitution during template argument
diff --git a/lib/Sema/SemaTemplateVariadic.cpp b/lib/Sema/SemaTemplateVariadic.cpp
index 8e4ce0d9da6f..e4fab71d995b 100644
--- a/lib/Sema/SemaTemplateVariadic.cpp
+++ b/lib/Sema/SemaTemplateVariadic.cpp
@@ -197,6 +197,20 @@ namespace {
};
}
+/// \brief Determine whether it's possible for an unexpanded parameter pack to
+/// be valid in this location. This only happens when we're in a declaration
+/// that is nested within an expression that could be expanded, such as a
+/// lambda-expression within a function call.
+///
+/// This is conservatively correct, but may claim that some unexpanded packs are
+/// permitted when they are not.
+bool Sema::isUnexpandedParameterPackPermitted() {
+ for (auto *SI : FunctionScopes)
+ if (isa<sema::LambdaScopeInfo>(SI))
+ return true;
+ return false;
+}
+
/// \brief Diagnose all of the unexpanded parameter packs in the given
/// vector.
bool
@@ -230,7 +244,7 @@ Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
else
Name = Unexpanded[I].first.get<NamedDecl *>()->getIdentifier();
- if (Name && NamesKnown.insert(Name))
+ if (Name && NamesKnown.insert(Name).second)
Names.push_back(Name);
if (Unexpanded[I].second.isValid())
@@ -733,24 +747,48 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
case TST_error:
break;
}
-
+
for (unsigned I = 0, N = D.getNumTypeObjects(); I != N; ++I) {
const DeclaratorChunk &Chunk = D.getTypeObject(I);
switch (Chunk.Kind) {
case DeclaratorChunk::Pointer:
case DeclaratorChunk::Reference:
case DeclaratorChunk::Paren:
+ case DeclaratorChunk::BlockPointer:
// These declarator chunks cannot contain any parameter packs.
break;
case DeclaratorChunk::Array:
+ if (Chunk.Arr.NumElts &&
+ Chunk.Arr.NumElts->containsUnexpandedParameterPack())
+ return true;
+ break;
case DeclaratorChunk::Function:
- case DeclaratorChunk::BlockPointer:
- // Syntactically, these kinds of declarator chunks all come after the
- // declarator-id (conceptually), so the parser should not invoke this
- // routine at this time.
- llvm_unreachable("Could not have seen this kind of declarator chunk");
-
+ for (unsigned i = 0, e = Chunk.Fun.NumParams; i != e; ++i) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(Chunk.Fun.Params[i].Param);
+ QualType ParamTy = Param->getType();
+ assert(!ParamTy.isNull() && "Couldn't parse type?");
+ if (ParamTy->containsUnexpandedParameterPack()) return true;
+ }
+
+ if (Chunk.Fun.getExceptionSpecType() == EST_Dynamic) {
+ for (unsigned i = 0; i != Chunk.Fun.NumExceptions; ++i) {
+ if (Chunk.Fun.Exceptions[i]
+ .Ty.get()
+ ->containsUnexpandedParameterPack())
+ return true;
+ }
+ } else if (Chunk.Fun.getExceptionSpecType() == EST_ComputedNoexcept &&
+ Chunk.Fun.NoexceptExpr->containsUnexpandedParameterPack())
+ return true;
+
+ if (Chunk.Fun.hasTrailingReturnType()) {
+ QualType T = Chunk.Fun.getTrailingReturnType().get();
+ if (!T.isNull() && T->containsUnexpandedParameterPack())
+ return true;
+ }
+ break;
+
case DeclaratorChunk::MemberPointer:
if (Chunk.Mem.Scope().getScopeRep() &&
Chunk.Mem.Scope().getScopeRep()->containsUnexpandedParameterPack())
@@ -800,7 +838,6 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
LookupName(R, S);
NamedDecl *ParameterPack = nullptr;
- ParameterPackValidatorCCC Validator;
switch (R.getResultKind()) {
case LookupResult::Found:
ParameterPack = R.getFoundDecl();
@@ -808,9 +845,10 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
- if (TypoCorrection Corrected = CorrectTypo(R.getLookupNameInfo(),
- R.getLookupKind(), S, nullptr,
- Validator, CTK_ErrorRecovery)) {
+ if (TypoCorrection Corrected =
+ CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, nullptr,
+ llvm::make_unique<ParameterPackValidatorCCC>(),
+ CTK_ErrorRecovery)) {
diagnoseTypo(Corrected,
PDiag(diag::err_sizeof_pack_no_pack_name_suggest) << &Name,
PDiag(diag::note_parameter_pack_here));
@@ -897,3 +935,108 @@ Sema::getTemplateArgumentPackExpansionPattern(
llvm_unreachable("Invalid TemplateArgument Kind!");
}
+
+static void CheckFoldOperand(Sema &S, Expr *E) {
+ if (!E)
+ return;
+
+ E = E->IgnoreImpCasts();
+ if (isa<BinaryOperator>(E) || isa<AbstractConditionalOperator>(E)) {
+ S.Diag(E->getExprLoc(), diag::err_fold_expression_bad_operand)
+ << E->getSourceRange()
+ << FixItHint::CreateInsertion(E->getLocStart(), "(")
+ << FixItHint::CreateInsertion(E->getLocEnd(), ")");
+ }
+}
+
+ExprResult Sema::ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
+ tok::TokenKind Operator,
+ SourceLocation EllipsisLoc, Expr *RHS,
+ SourceLocation RParenLoc) {
+ // LHS and RHS must be cast-expressions. We allow an arbitrary expression
+ // in the parser and reduce down to just cast-expressions here.
+ CheckFoldOperand(*this, LHS);
+ CheckFoldOperand(*this, RHS);
+
+ // [expr.prim.fold]p3:
+ // In a binary fold, op1 and op2 shall be the same fold-operator, and
+ // either e1 shall contain an unexpanded parameter pack or e2 shall contain
+ // an unexpanded parameter pack, but not both.
+ if (LHS && RHS &&
+ LHS->containsUnexpandedParameterPack() ==
+ RHS->containsUnexpandedParameterPack()) {
+ return Diag(EllipsisLoc,
+ LHS->containsUnexpandedParameterPack()
+ ? diag::err_fold_expression_packs_both_sides
+ : diag::err_pack_expansion_without_parameter_packs)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ }
+
+ // [expr.prim.fold]p2:
+ // In a unary fold, the cast-expression shall contain an unexpanded
+ // parameter pack.
+ if (!LHS || !RHS) {
+ Expr *Pack = LHS ? LHS : RHS;
+ assert(Pack && "fold expression with neither LHS nor RHS");
+ if (!Pack->containsUnexpandedParameterPack())
+ return Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << Pack->getSourceRange();
+ }
+
+ BinaryOperatorKind Opc = ConvertTokenKindToBinaryOpcode(Operator);
+ return BuildCXXFoldExpr(LParenLoc, LHS, Opc, EllipsisLoc, RHS, RParenLoc);
+}
+
+ExprResult Sema::BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
+ BinaryOperatorKind Operator,
+ SourceLocation EllipsisLoc, Expr *RHS,
+ SourceLocation RParenLoc) {
+ return new (Context) CXXFoldExpr(Context.DependentTy, LParenLoc, LHS,
+ Operator, EllipsisLoc, RHS, RParenLoc);
+}
+
+ExprResult Sema::BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
+ BinaryOperatorKind Operator) {
+ // [temp.variadic]p9:
+ // If N is zero for a unary fold-expression, the value of the expression is
+ // * -> 1
+ // + -> int()
+ // & -> -1
+ // | -> int()
+ // && -> true
+ // || -> false
+ // , -> void()
+ // if the operator is not listed [above], the instantiation is ill-formed.
+ //
+ // Note that we need to use something like int() here, not merely 0, to
+ // prevent the result from being a null pointer constant.
+ QualType ScalarType;
+ switch (Operator) {
+ case BO_Add:
+ ScalarType = Context.IntTy;
+ break;
+ case BO_Mul:
+ return ActOnIntegerConstant(EllipsisLoc, 1);
+ case BO_Or:
+ ScalarType = Context.IntTy;
+ break;
+ case BO_And:
+ return CreateBuiltinUnaryOp(EllipsisLoc, UO_Minus,
+ ActOnIntegerConstant(EllipsisLoc, 1).get());
+ case BO_LOr:
+ return ActOnCXXBoolLiteral(EllipsisLoc, tok::kw_false);
+ case BO_LAnd:
+ return ActOnCXXBoolLiteral(EllipsisLoc, tok::kw_true);
+ case BO_Comma:
+ ScalarType = Context.VoidTy;
+ break;
+
+ default:
+ return Diag(EllipsisLoc, diag::err_fold_expression_empty)
+ << BinaryOperator::getOpcodeStr(Operator);
+ }
+
+ return new (Context) CXXScalarValueInitExpr(
+ ScalarType, Context.getTrivialTypeSourceInfo(ScalarType, EllipsisLoc),
+ EllipsisLoc);
+}
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
index be1191c1e8e0..0f96a1cbce35 100644
--- a/lib/Sema/SemaType.cpp
+++ b/lib/Sema/SemaType.cpp
@@ -107,6 +107,7 @@ static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr,
case AttributeList::AT_StdCall: \
case AttributeList::AT_ThisCall: \
case AttributeList::AT_Pascal: \
+ case AttributeList::AT_VectorCall: \
case AttributeList::AT_MSABI: \
case AttributeList::AT_SysVABI: \
case AttributeList::AT_Regparm: \
@@ -660,26 +661,27 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
// ...and *prepend* it to the declarator.
SourceLocation NoLoc;
declarator.AddInnermostTypeInfo(DeclaratorChunk::getFunction(
- /*HasProto=*/true,
- /*IsAmbiguous=*/false,
- /*LParenLoc=*/NoLoc,
- /*ArgInfo=*/nullptr,
- /*NumArgs=*/0,
- /*EllipsisLoc=*/NoLoc,
- /*RParenLoc=*/NoLoc,
- /*TypeQuals=*/0,
- /*RefQualifierIsLvalueRef=*/true,
- /*RefQualifierLoc=*/NoLoc,
- /*ConstQualifierLoc=*/NoLoc,
- /*VolatileQualifierLoc=*/NoLoc,
- /*MutableLoc=*/NoLoc,
- EST_None,
- /*ESpecLoc=*/NoLoc,
- /*Exceptions=*/nullptr,
- /*ExceptionRanges=*/nullptr,
- /*NumExceptions=*/0,
- /*NoexceptExpr=*/nullptr,
- loc, loc, declarator));
+ /*HasProto=*/true,
+ /*IsAmbiguous=*/false,
+ /*LParenLoc=*/NoLoc,
+ /*ArgInfo=*/nullptr,
+ /*NumArgs=*/0,
+ /*EllipsisLoc=*/NoLoc,
+ /*RParenLoc=*/NoLoc,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLvalueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc,
+ /*MutableLoc=*/NoLoc, EST_None,
+ /*ESpecLoc=*/NoLoc,
+ /*Exceptions=*/nullptr,
+ /*ExceptionRanges=*/nullptr,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/nullptr,
+ /*ExceptionSpecTokens=*/nullptr,
+ loc, loc, declarator));
// For consistency, make sure the state still has us as processing
// the decl spec.
@@ -763,7 +765,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// is inferred from the return statements inside the block.
// The declspec is always missing in a lambda expr context; it is either
// specified with a trailing return type or inferred.
- if (S.getLangOpts().CPlusPlus1y &&
+ if (S.getLangOpts().CPlusPlus14 &&
declarator.getContext() == Declarator::LambdaExprContext) {
// In C++1y, a lambda's implicit return type is 'auto'.
Result = Context.getAutoDeductType();
@@ -1005,16 +1007,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
const unsigned TemplateParameterDepth = LSI->AutoTemplateParameterDepth;
const unsigned AutoParameterPosition = LSI->AutoTemplateParams.size();
const bool IsParameterPack = declarator.hasEllipsis();
-
- // Create a name for the invented template parameter type.
- std::string InventedTemplateParamName = "$auto-";
- llvm::raw_string_ostream ss(InventedTemplateParamName);
- ss << TemplateParameterDepth;
- ss << "-" << AutoParameterPosition;
- ss.flush();
-
- IdentifierInfo& TemplateParamII = Context.Idents.get(
- InventedTemplateParamName.c_str());
+
// Turns out we must create the TemplateTypeParmDecl here to
// retrieve the corresponding template parameter type.
TemplateTypeParmDecl *CorrespondingTemplateParam =
@@ -1029,11 +1022,11 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
/*NameLoc*/ declarator.getLocStart(),
TemplateParameterDepth,
AutoParameterPosition, // our template param index
- /* Identifier*/ &TemplateParamII, false, IsParameterPack);
+ /* Identifier*/ nullptr, false, IsParameterPack);
LSI->AutoTemplateParams.push_back(CorrespondingTemplateParam);
// Replace the 'auto' in the function parameter with this invented
// template type parameter.
- Result = QualType(CorrespondingTemplateParam->getTypeForDecl(), 0);
+ Result = QualType(CorrespondingTemplateParam->getTypeForDecl(), 0);
} else {
Result = Context.getAutoType(QualType(), /*decltype(auto)*/false, false);
}
@@ -1107,8 +1100,9 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
S.Diag(DS.getConstSpecLoc(), diag::warn_typecheck_function_qualifiers)
<< Result << DS.getSourceRange();
else if (TypeQuals & DeclSpec::TQ_volatile)
- S.Diag(DS.getVolatileSpecLoc(), diag::warn_typecheck_function_qualifiers)
- << Result << DS.getSourceRange();
+ S.Diag(DS.getVolatileSpecLoc(),
+ diag::warn_typecheck_function_qualifiers)
+ << Result << DS.getSourceRange();
else {
assert((TypeQuals & (DeclSpec::TQ_restrict | DeclSpec::TQ_atomic)) &&
"Has CVRA quals but not C, V, R, or A?");
@@ -1174,6 +1168,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Qualified;
}
+ assert(!Result.isNull() && "This function should not return a null type");
return Result;
}
@@ -1186,6 +1181,9 @@ static std::string getPrintableNameForEntity(DeclarationName Entity) {
QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
Qualifiers Qs, const DeclSpec *DS) {
+ if (T.isNull())
+ return QualType();
+
// Enforce C99 6.7.3p2: "Types other than pointer types derived from
// object or incomplete types shall not be restrict-qualified."
if (Qs.hasRestrict()) {
@@ -1224,6 +1222,9 @@ QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
unsigned CVRA, const DeclSpec *DS) {
+ if (T.isNull())
+ return QualType();
+
// Convert from DeclSpec::TQ to Qualifiers::TQ by just dropping TQ_atomic.
unsigned CVR = CVRA & ~DeclSpec::TQ_atomic;
@@ -1746,7 +1747,7 @@ bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) {
}
// Functions cannot return half FP.
- if (T->isHalfType()) {
+ if (T->isHalfType() && !getLangOpts().HalfArgsAndReturns) {
Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 1 <<
FixItHint::CreateInsertion(Loc, "*");
return true;
@@ -1776,7 +1777,7 @@ QualType Sema::BuildFunctionType(QualType T,
if (ParamType->isVoidType()) {
Diag(Loc, diag::err_param_with_void_type);
Invalid = true;
- } else if (ParamType->isHalfType()) {
+ } else if (ParamType->isHalfType() && !getLangOpts().HalfArgsAndReturns) {
// Disallow half FP arguments.
Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 0 <<
FixItHint::CreateInsertion(Loc, "*");
@@ -2175,7 +2176,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
Error = 0;
break;
case Declarator::LambdaExprParameterContext:
- if (!(SemaRef.getLangOpts().CPlusPlus1y
+ if (!(SemaRef.getLangOpts().CPlusPlus14
&& D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto))
Error = 14;
break;
@@ -2208,11 +2209,11 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
Error = 10; // Type alias
break;
case Declarator::TrailingReturnContext:
- if (!SemaRef.getLangOpts().CPlusPlus1y)
+ if (!SemaRef.getLangOpts().CPlusPlus14)
Error = 11; // Function return type
break;
case Declarator::ConversionIdContext:
- if (!SemaRef.getLangOpts().CPlusPlus1y)
+ if (!SemaRef.getLangOpts().CPlusPlus14)
Error = 12; // conversion-type-id
break;
case Declarator::TypeNameContext:
@@ -2332,6 +2333,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
}
}
+ assert(!T.isNull() && "This function should not return a null type");
return T;
}
@@ -2481,7 +2483,8 @@ getCCForDeclaratorChunk(Sema &S, Declarator &D,
static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
QualType declSpecType,
TypeSourceInfo *TInfo) {
-
+ // The TypeSourceInfo that this function returns will not be a null type.
+ // If there is an error, this function will fill in a dummy type as fallback.
QualType T = declSpecType;
Declarator &D = state.getDeclarator();
Sema &S = state.getSema();
@@ -2697,7 +2700,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// and not, for instance, a pointer to a function.
if (D.getDeclSpec().containsPlaceholderType() &&
!FTI.hasTrailingReturnType() && chunkIndex == 0 &&
- !S.getLangOpts().CPlusPlus1y) {
+ !S.getLangOpts().CPlusPlus14) {
S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto
? diag::err_auto_missing_trailing_return
@@ -2751,7 +2754,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
S.Diag(D.getIdentifierLoc(), diag::err_opencl_half_return) << T;
D.setInvalidType(true);
}
- } else {
+ } else if (!S.getLangOpts().HalfArgsAndReturns) {
S.Diag(D.getIdentifierLoc(),
diag::err_parameters_retval_cannot_have_fp16_type) << 1;
D.setInvalidType(true);
@@ -2941,7 +2944,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
D.setInvalidType();
Param->setInvalidDecl();
}
- } else {
+ } else if (!S.getLangOpts().HalfArgsAndReturns) {
S.Diag(Param->getLocation(),
diag::err_parameters_retval_cannot_have_fp16_type) << 0;
D.setInvalidType();
@@ -2989,12 +2992,13 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
NoexceptExpr = FTI.NoexceptExpr;
}
- S.checkExceptionSpecification(FTI.getExceptionSpecType(),
+ S.checkExceptionSpecification(D.isFunctionDeclarationContext(),
+ FTI.getExceptionSpecType(),
DynamicExceptions,
DynamicExceptionRanges,
NoexceptExpr,
Exceptions,
- EPI);
+ EPI.ExceptionSpec);
T = Context.getFunctionType(T, ParamTys, EPI);
}
@@ -3021,6 +3025,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
llvm_unreachable("Nested-name-specifier must name a type");
case NestedNameSpecifier::TypeSpec:
@@ -3044,7 +3049,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
if (!ClsType.isNull())
- T = S.BuildMemberPointerType(T, ClsType, DeclType.Loc, D.getIdentifier());
+ T = S.BuildMemberPointerType(T, ClsType, DeclType.Loc,
+ D.getIdentifier());
if (T.isNull()) {
T = Context.IntTy;
D.setInvalidType(true);
@@ -3064,6 +3070,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
processTypeAttrs(state, T, TAL_DeclChunk, attrs);
}
+ assert(!T.isNull() && "T must not be null after this point");
+
if (LangOpts.CPlusPlus && T->isFunctionType()) {
const FunctionProtoType *FnTy = T->getAs<FunctionProtoType>();
assert(FnTy && "Why oh why is there not a FunctionProtoType here?");
@@ -3120,9 +3128,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
RemovalLocs.push_back(Chunk.Fun.getConstQualifierLoc());
if (Chunk.Fun.TypeQuals & Qualifiers::Volatile)
RemovalLocs.push_back(Chunk.Fun.getVolatileQualifierLoc());
- // FIXME: We do not track the location of the __restrict qualifier.
- //if (Chunk.Fun.TypeQuals & Qualifiers::Restrict)
- // RemovalLocs.push_back(Chunk.Fun.getRestrictQualifierLoc());
+ if (Chunk.Fun.TypeQuals & Qualifiers::Restrict)
+ RemovalLocs.push_back(Chunk.Fun.getRestrictQualifierLoc());
if (!RemovalLocs.empty()) {
std::sort(RemovalLocs.begin(), RemovalLocs.end(),
BeforeThanCompare<SourceLocation>(S.getSourceManager()));
@@ -3153,12 +3160,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
// Apply any undistributed attributes from the declarator.
- if (!T.isNull())
- if (AttributeList *attrs = D.getAttributes())
- processTypeAttrs(state, T, TAL_DeclName, attrs);
+ if (AttributeList *attrs = D.getAttributes())
+ processTypeAttrs(state, T, TAL_DeclName, attrs);
// Diagnose any ignored type attributes.
- if (!T.isNull()) state.diagnoseIgnoredTypeAttrs(T);
+ state.diagnoseIgnoredTypeAttrs(T);
// C++0x [dcl.constexpr]p9:
// A constexpr specifier used in an object declaration declares the object
@@ -3169,7 +3175,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// If there was an ellipsis in the declarator, the declaration declares a
// parameter pack whose type may be a pack expansion type.
- if (D.hasEllipsis() && !T.isNull()) {
+ if (D.hasEllipsis()) {
// C++0x [dcl.fct]p13:
// A declarator-id or abstract-declarator containing an ellipsis shall
// only be used in a parameter-declaration. Such a parameter-declaration
@@ -3234,15 +3240,15 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case Declarator::TemplateTypeArgContext:
// FIXME: We may want to allow parameter packs in block-literal contexts
// in the future.
- S.Diag(D.getEllipsisLoc(), diag::err_ellipsis_in_declarator_not_parameter);
+ S.Diag(D.getEllipsisLoc(),
+ diag::err_ellipsis_in_declarator_not_parameter);
D.setEllipsisLoc(SourceLocation());
break;
}
}
- if (T.isNull())
- return Context.getNullTypeSourceInfo();
- else if (D.isInvalidType())
+ assert(!T.isNull() && "T must not be null at the end of this function");
+ if (D.isInvalidType())
return Context.getTrivialTypeSourceInfo(T);
return S.GetTypeSourceInfoForDeclarator(D, T, TInfo);
@@ -3261,8 +3267,6 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S) {
TypeSourceInfo *ReturnTypeInfo = nullptr;
QualType T = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
- if (T.isNull())
- return Context.getNullTypeSourceInfo();
if (D.isPrototypeContext() && getLangOpts().ObjCAutoRefCount)
inferARCWriteback(state, T);
@@ -3376,8 +3380,6 @@ TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) {
TypeSourceInfo *ReturnTypeInfo = nullptr;
QualType declSpecTy = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
- if (declSpecTy.isNull())
- return Context.getNullTypeSourceInfo();
if (getLangOpts().ObjCAutoRefCount) {
Qualifiers::ObjCLifetime ownership = Context.getInnerObjCOwnership(FromTy);
@@ -3417,6 +3419,8 @@ static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) {
return AttributeList::AT_ThisCall;
case AttributedType::attr_pascal:
return AttributeList::AT_Pascal;
+ case AttributedType::attr_vectorcall:
+ return AttributeList::AT_VectorCall;
case AttributedType::attr_pcs:
case AttributedType::attr_pcs_vfp:
return AttributeList::AT_Pcs;
@@ -3717,6 +3721,7 @@ namespace {
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
llvm_unreachable("Nested-name-specifier must name a type");
}
@@ -3975,6 +3980,8 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
ASIdx = LangAS::opencl_local; break;
case AttributeList::AT_OpenCLConstantAddressSpace:
ASIdx = LangAS::opencl_constant; break;
+ case AttributeList::AT_OpenCLGenericAddressSpace:
+ ASIdx = LangAS::opencl_generic; break;
default:
assert(Attr.getKind() == AttributeList::AT_OpenCLPrivateAddressSpace);
ASIdx = 0; break;
@@ -4432,6 +4439,8 @@ static AttributedType::Kind getCCTypeAttrKind(AttributeList &Attr) {
return AttributedType::attr_thiscall;
case AttributeList::AT_Pascal:
return AttributedType::attr_pascal;
+ case AttributeList::AT_VectorCall:
+ return AttributedType::attr_vectorcall;
case AttributeList::AT_Pcs: {
// The attribute may have had a fixit applied where we treated an
// identifier as a string literal. The contents of the string are valid,
@@ -4549,7 +4558,7 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
}
// Diagnose use of callee-cleanup calling convention on variadic functions.
- if (isCalleeCleanup(CC)) {
+ if (!supportsVariadicCall(CC)) {
const FunctionProtoType *FnP = dyn_cast<FunctionProtoType>(fn);
if (FnP && FnP->isVariadic()) {
unsigned DiagID = diag::err_cconv_varargs;
@@ -4564,23 +4573,12 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
}
}
- // Diagnose the use of X86 fastcall on unprototyped functions.
- if (CC == CC_X86FastCall) {
- if (isa<FunctionNoProtoType>(fn)) {
- S.Diag(attr.getLoc(), diag::err_cconv_knr)
- << FunctionType::getNameForCallConv(CC);
- attr.setInvalid();
- return true;
- }
-
- // Also diagnose fastcall with regparm.
- if (fn->getHasRegParm()) {
- S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
- << "regparm"
- << FunctionType::getNameForCallConv(CC);
- attr.setInvalid();
- return true;
- }
+ // Also diagnose fastcall with regparm.
+ if (CC == CC_X86FastCall && fn->getHasRegParm()) {
+ S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << "regparm" << FunctionType::getNameForCallConv(CC_X86FastCall);
+ attr.setInvalid();
+ return true;
}
// Modify the CC from the wrapped function type, wrap it all back, and then
@@ -4739,9 +4737,7 @@ static bool isPermittedNeonBaseType(QualType &Ty,
// Signed poly is mathematically wrong, but has been baked into some ABIs by
// now.
bool IsPolyUnsigned = Triple.getArch() == llvm::Triple::aarch64 ||
- Triple.getArch() == llvm::Triple::aarch64_be ||
- Triple.getArch() == llvm::Triple::arm64 ||
- Triple.getArch() == llvm::Triple::arm64_be;
+ Triple.getArch() == llvm::Triple::aarch64_be;
if (VecKind == VectorType::NeonPolyVector) {
if (IsPolyUnsigned) {
// AArch64 polynomial vectors are unsigned and support poly64.
@@ -4759,9 +4755,7 @@ static bool isPermittedNeonBaseType(QualType &Ty,
// Non-polynomial vector types: the usual suspects are allowed, as well as
// float64_t on AArch64.
bool Is64Bit = Triple.getArch() == llvm::Triple::aarch64 ||
- Triple.getArch() == llvm::Triple::aarch64_be ||
- Triple.getArch() == llvm::Triple::arm64 ||
- Triple.getArch() == llvm::Triple::arm64_be;
+ Triple.getArch() == llvm::Triple::aarch64_be;
if (Is64Bit && BTy->getKind() == BuiltinType::Double)
return true;
@@ -4899,6 +4893,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
case AttributeList::AT_OpenCLGlobalAddressSpace:
case AttributeList::AT_OpenCLLocalAddressSpace:
case AttributeList::AT_OpenCLConstantAddressSpace:
+ case AttributeList::AT_OpenCLGenericAddressSpace:
case AttributeList::AT_AddressSpace:
HandleAddressSpaceTypeAttribute(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
@@ -5098,31 +5093,9 @@ static bool hasVisibleDefinition(Sema &S, NamedDecl *D, NamedDecl **Suggested) {
// If this definition was instantiated from a template, map back to the
// pattern from which it was instantiated.
- //
- // FIXME: There must be a better place for this to live.
if (auto *RD = dyn_cast<CXXRecordDecl>(D)) {
- if (auto *TD = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
- auto From = TD->getInstantiatedFrom();
- if (auto *CTD = From.dyn_cast<ClassTemplateDecl*>()) {
- while (auto *NewCTD = CTD->getInstantiatedFromMemberTemplate()) {
- if (NewCTD->isMemberSpecialization())
- break;
- CTD = NewCTD;
- }
- RD = CTD->getTemplatedDecl();
- } else if (auto *CTPSD = From.dyn_cast<
- ClassTemplatePartialSpecializationDecl *>()) {
- while (auto *NewCTPSD = CTPSD->getInstantiatedFromMember()) {
- if (NewCTPSD->isMemberSpecialization())
- break;
- CTPSD = NewCTPSD;
- }
- RD = CTPSD;
- }
- } else if (isTemplateInstantiation(RD->getTemplateSpecializationKind())) {
- while (auto *NewRD = RD->getInstantiatedFromMemberClass())
- RD = NewRD;
- }
+ if (auto *Pattern = RD->getTemplateInstantiationPattern())
+ RD = Pattern;
D = RD->getDefinition();
} else if (auto *ED = dyn_cast<EnumDecl>(D)) {
while (auto *NewED = ED->getInstantiatedFromMemberEnum())
@@ -5178,14 +5151,6 @@ static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
? S.ImplicitMSInheritanceAttrLoc
: RD->getSourceRange()));
}
-
- if (RD->hasDefinition()) {
- // Assign inheritance models to all of the base classes, because now we can
- // form pointers to members of base classes without calling
- // RequireCompleteType on the pointer to member of the base class type.
- for (const CXXBaseSpecifier &BS : RD->bases())
- assignInheritanceModel(S, BS.getType()->getAsCXXRecordDecl());
- }
}
/// \brief The implementation of RequireCompleteType
@@ -5510,6 +5475,8 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
} else if (const ObjCPropertyRefExpr *PR = dyn_cast<ObjCPropertyRefExpr>(E)) {
if (PR->isExplicitProperty())
return PR->getExplicitProperty()->getType();
+ } else if (auto *PE = dyn_cast<PredefinedExpr>(E)) {
+ return PE->getType();
}
// C++11 [expr.lambda.prim]p18:
@@ -5550,11 +5517,19 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
return T;
}
-QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc) {
+QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc,
+ bool AsUnevaluated) {
ExprResult ER = CheckPlaceholderExpr(E);
if (ER.isInvalid()) return QualType();
E = ER.get();
+ if (AsUnevaluated && ActiveTemplateInstantiations.empty() &&
+ E->HasSideEffects(Context, false)) {
+ // The expression operand for decltype is in an unevaluated expression
+ // context, so side effects could result in unintended consequences.
+ Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
+ }
+
return Context.getDecltypeType(E, getDecltypeForExpr(*this, E));
}
diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h
index 312811d2c008..36abbb624af7 100644
--- a/lib/Sema/TreeTransform.h
+++ b/lib/Sema/TreeTransform.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SEMA_TREETRANSFORM_H
-#define LLVM_CLANG_SEMA_TREETRANSFORM_H
+#ifndef LLVM_CLANG_LIB_SEMA_TREETRANSFORM_H
+#define LLVM_CLANG_LIB_SEMA_TREETRANSFORM_H
#include "TypeLocBuilder.h"
#include "clang/AST/Decl.h"
@@ -327,6 +327,27 @@ public:
/// \returns the transformed OpenMP clause.
OMPClause *TransformOMPClause(OMPClause *S);
+ /// \brief Transform the given attribute.
+ ///
+ /// By default, this routine transforms a statement by delegating to the
+ /// appropriate TransformXXXAttr function to transform a specific kind
+ /// of attribute. Subclasses may override this function to transform
+ /// attributed statements using some other mechanism.
+ ///
+ /// \returns the transformed attribute
+ const Attr *TransformAttr(const Attr *S);
+
+/// \brief Transform the specified attribute.
+///
+/// Subclasses should override the transformation of attributes with a pragma
+/// spelling to transform expressions stored within the attribute.
+///
+/// \returns the transformed attribute.
+#define ATTR(X)
+#define PRAGMA_SPELLING_ATTR(X) \
+ const X##Attr *Transform##X##Attr(const X##Attr *R) { return R; }
+#include "clang/Basic/AttrList.inc"
+
/// \brief Transform the given expression.
///
/// By default, this routine transforms an expression by delegating to the
@@ -542,10 +563,17 @@ public:
QualType Transform##CLASS##Type(TypeLocBuilder &TLB, CLASS##TypeLoc T);
#include "clang/AST/TypeLocNodes.def"
+ template<typename Fn>
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals);
+ unsigned ThisTypeQuals,
+ Fn TransformExceptionSpec);
+
+ bool TransformExceptionSpec(SourceLocation Loc,
+ FunctionProtoType::ExceptionSpecInfo &ESI,
+ SmallVectorImpl<QualType> &Exceptions,
+ bool &Changed);
StmtResult TransformSEHHandler(Stmt *Handler);
@@ -560,10 +588,9 @@ public:
TemplateName Template,
CXXScopeSpec &SS);
- QualType
- TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
- DependentTemplateSpecializationTypeLoc TL,
- NestedNameSpecifierLoc QualifierLoc);
+ QualType TransformDependentTemplateSpecializationType(
+ TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL,
+ NestedNameSpecifierLoc QualifierLoc);
/// \brief Transforms the parameters of a function type into the
/// given vectors.
@@ -1665,10 +1692,8 @@ public:
}
StmtResult RebuildSEHTryStmt(bool IsCXXTry, SourceLocation TryLoc,
- Stmt *TryBlock, Stmt *Handler, int HandlerIndex,
- int HandlerParentIndex) {
- return getSema().ActOnSEHTryBlock(IsCXXTry, TryLoc, TryBlock, Handler,
- HandlerIndex, HandlerParentIndex);
+ Stmt *TryBlock, Stmt *Handler) {
+ return getSema().ActOnSEHTryBlock(IsCXXTry, TryLoc, TryBlock, Handler);
}
StmtResult RebuildSEHExceptStmt(SourceLocation Loc, Expr *FilterExpr,
@@ -1680,6 +1705,15 @@ public:
return getSema().ActOnSEHFinallyBlock(Loc, Block);
}
+ /// \brief Build a new predefined expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildPredefinedExpr(SourceLocation Loc,
+ PredefinedExpr::IdentType IT) {
+ return getSema().BuildPredefinedExpr(Loc, IT);
+ }
+
/// \brief Build a new expression that references a declaration.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2757,6 +2791,27 @@ public:
return getSema().CheckPackExpansion(Pattern, EllipsisLoc, NumExpansions);
}
+ /// \brief Build a new C++1z fold-expression.
+ ///
+ /// By default, performs semantic analysis in order to build a new fold
+ /// expression.
+ ExprResult RebuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
+ BinaryOperatorKind Operator,
+ SourceLocation EllipsisLoc, Expr *RHS,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXFoldExpr(LParenLoc, LHS, Operator, EllipsisLoc,
+ RHS, RParenLoc);
+ }
+
+ /// \brief Build an empty C++1z fold-expression with the given operator.
+ ///
+ /// By default, produces the fallback value for the fold-expression, or
+ /// produce an error if there is no fallback value.
+ ExprResult RebuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
+ BinaryOperatorKind Operator) {
+ return getSema().BuildEmptyCXXFoldExpr(EllipsisLoc, Operator);
+ }
+
/// \brief Build a new atomic operation expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -3103,6 +3158,14 @@ TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
SS.MakeGlobal(SemaRef.Context, Q.getBeginLoc());
break;
+ case NestedNameSpecifier::Super: {
+ CXXRecordDecl *RD =
+ cast_or_null<CXXRecordDecl>(getDerived().TransformDecl(
+ SourceLocation(), QNNS->getAsRecordDecl()));
+ SS.MakeSuper(SemaRef.Context, RD, Q.getBeginLoc(), Q.getEndLoc());
+ break;
+ }
+
case NestedNameSpecifier::TypeSpecWithTemplate:
case NestedNameSpecifier::TypeSpec: {
TypeLoc TL = TransformTypeInObjectScope(Q.getTypeLoc(), ObjectType,
@@ -4515,15 +4578,20 @@ template<typename Derived>
QualType
TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL) {
- return getDerived().TransformFunctionProtoType(TLB, TL, nullptr, 0);
-}
-
-template<typename Derived>
-QualType
-TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
- FunctionProtoTypeLoc TL,
- CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals) {
+ SmallVector<QualType, 4> ExceptionStorage;
+ TreeTransform *This = this; // Work around gcc.gnu.org/PR56135.
+ return getDerived().TransformFunctionProtoType(
+ TLB, TL, nullptr, 0,
+ [&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
+ return This->TransformExceptionSpec(TL.getBeginLoc(), ESI,
+ ExceptionStorage, Changed);
+ });
+}
+
+template<typename Derived> template<typename Fn>
+QualType TreeTransform<Derived>::TransformFunctionProtoType(
+ TypeLocBuilder &TLB, FunctionProtoTypeLoc TL, CXXRecordDecl *ThisContext,
+ unsigned ThisTypeQuals, Fn TransformExceptionSpec) {
// Transform the parameters and return type.
//
// We are required to instantiate the params and return type in source order.
@@ -4568,15 +4636,21 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
return QualType();
}
- // FIXME: Need to transform the exception-specification too.
+ FunctionProtoType::ExtProtoInfo EPI = T->getExtProtoInfo();
+
+ bool EPIChanged = false;
+ if (TransformExceptionSpec(EPI.ExceptionSpec, EPIChanged))
+ return QualType();
+
+ // FIXME: Need to transform ConsumedParameters for variadic template
+ // expansion.
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() || ResultType != T->getReturnType() ||
T->getNumParams() != ParamTypes.size() ||
!std::equal(T->param_type_begin(), T->param_type_end(),
- ParamTypes.begin())) {
- Result = getDerived().RebuildFunctionProtoType(ResultType, ParamTypes,
- T->getExtProtoInfo());
+ ParamTypes.begin()) || EPIChanged) {
+ Result = getDerived().RebuildFunctionProtoType(ResultType, ParamTypes, EPI);
if (Result.isNull())
return QualType();
}
@@ -4593,6 +4667,107 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
}
template<typename Derived>
+bool TreeTransform<Derived>::TransformExceptionSpec(
+ SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI,
+ SmallVectorImpl<QualType> &Exceptions, bool &Changed) {
+ assert(ESI.Type != EST_Uninstantiated && ESI.Type != EST_Unevaluated);
+
+ // Instantiate a dynamic noexcept expression, if any.
+ if (ESI.Type == EST_ComputedNoexcept) {
+ EnterExpressionEvaluationContext Unevaluated(getSema(),
+ Sema::ConstantEvaluated);
+ ExprResult NoexceptExpr = getDerived().TransformExpr(ESI.NoexceptExpr);
+ if (NoexceptExpr.isInvalid())
+ return true;
+
+ NoexceptExpr = getSema().CheckBooleanCondition(
+ NoexceptExpr.get(), NoexceptExpr.get()->getLocStart());
+ if (NoexceptExpr.isInvalid())
+ return true;
+
+ if (!NoexceptExpr.get()->isValueDependent()) {
+ NoexceptExpr = getSema().VerifyIntegerConstantExpression(
+ NoexceptExpr.get(), nullptr,
+ diag::err_noexcept_needs_constant_expression,
+ /*AllowFold*/false);
+ if (NoexceptExpr.isInvalid())
+ return true;
+ }
+
+ if (ESI.NoexceptExpr != NoexceptExpr.get())
+ Changed = true;
+ ESI.NoexceptExpr = NoexceptExpr.get();
+ }
+
+ if (ESI.Type != EST_Dynamic)
+ return false;
+
+ // Instantiate a dynamic exception specification's type.
+ for (QualType T : ESI.Exceptions) {
+ if (const PackExpansionType *PackExpansion =
+ T->getAs<PackExpansionType>()) {
+ Changed = true;
+
+ // We have a pack expansion. Instantiate it.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(PackExpansion->getPattern(),
+ Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can and
+ // should
+ // be expanded.
+ bool Expand = false;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions = PackExpansion->getNumExpansions();
+ // FIXME: Track the location of the ellipsis (and track source location
+ // information for the types in the exception specification in general).
+ if (getDerived().TryExpandParameterPacks(
+ Loc, SourceRange(), Unexpanded, Expand,
+ RetainExpansion, NumExpansions))
+ return true;
+
+ if (!Expand) {
+ // We can't expand this pack expansion into separate arguments yet;
+ // just substitute into the pattern and create a new pack expansion
+ // type.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ QualType U = getDerived().TransformType(PackExpansion->getPattern());
+ if (U.isNull())
+ return true;
+
+ U = SemaRef.Context.getPackExpansionType(U, NumExpansions);
+ Exceptions.push_back(U);
+ continue;
+ }
+
+ // Substitute into the pack expansion pattern for each slice of the
+ // pack.
+ for (unsigned ArgIdx = 0; ArgIdx != *NumExpansions; ++ArgIdx) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), ArgIdx);
+
+ QualType U = getDerived().TransformType(PackExpansion->getPattern());
+ if (U.isNull() || SemaRef.CheckSpecifiedExceptionType(U, Loc))
+ return true;
+
+ Exceptions.push_back(U);
+ }
+ } else {
+ QualType U = getDerived().TransformType(T);
+ if (U.isNull() || SemaRef.CheckSpecifiedExceptionType(U, Loc))
+ return true;
+ if (T != U)
+ Changed = true;
+
+ Exceptions.push_back(U);
+ }
+ }
+
+ ESI.Exceptions = Exceptions;
+ return false;
+}
+
+template<typename Derived>
QualType TreeTransform<Derived>::TransformFunctionNoProtoType(
TypeLocBuilder &TLB,
FunctionNoProtoTypeLoc TL) {
@@ -5147,8 +5322,8 @@ TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
if (const TemplateSpecializationType *TST =
NamedT->getAs<TemplateSpecializationType>()) {
TemplateName Template = TST->getTemplateName();
- if (TypeAliasTemplateDecl *TAT =
- dyn_cast_or_null<TypeAliasTemplateDecl>(Template.getAsTemplateDecl())) {
+ if (TypeAliasTemplateDecl *TAT = dyn_cast_or_null<TypeAliasTemplateDecl>(
+ Template.getAsTemplateDecl())) {
SemaRef.Diag(TL.getNamedTypeLoc().getBeginLoc(),
diag::err_tag_reference_non_tag) << 4;
SemaRef.Diag(TAT->getLocation(), diag::note_declared_at);
@@ -5529,19 +5704,43 @@ TreeTransform<Derived>::TransformLabelStmt(LabelStmt *S) {
SubStmt.get());
}
-template<typename Derived>
-StmtResult
-TreeTransform<Derived>::TransformAttributedStmt(AttributedStmt *S) {
+template <typename Derived>
+const Attr *TreeTransform<Derived>::TransformAttr(const Attr *R) {
+ if (!R)
+ return R;
+
+ switch (R->getKind()) {
+// Transform attributes with a pragma spelling by calling TransformXXXAttr.
+#define ATTR(X)
+#define PRAGMA_SPELLING_ATTR(X) \
+ case attr::X: \
+ return getDerived().Transform##X##Attr(cast<X##Attr>(R));
+#include "clang/Basic/AttrList.inc"
+ default:
+ return R;
+ }
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformAttributedStmt(AttributedStmt *S) {
+ bool AttrsChanged = false;
+ SmallVector<const Attr *, 1> Attrs;
+
+ // Visit attributes and keep track if any are transformed.
+ for (const auto *I : S->getAttrs()) {
+ const Attr *R = getDerived().TransformAttr(I);
+ AttrsChanged |= (I != R);
+ Attrs.push_back(R);
+ }
+
StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt());
if (SubStmt.isInvalid())
return StmtError();
- // TODO: transform attributes
- if (SubStmt.get() == S->getSubStmt() /* && attrs are the same */)
+ if (SubStmt.get() == S->getSubStmt() && !AttrsChanged)
return S;
- return getDerived().RebuildAttributedStmt(S->getAttrLoc(),
- S->getAttrs(),
+ return getDerived().RebuildAttributedStmt(S->getAttrLoc(), Attrs,
SubStmt.get());
}
@@ -5824,7 +6023,8 @@ TreeTransform<Derived>::TransformBreakStmt(BreakStmt *S) {
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformReturnStmt(ReturnStmt *S) {
- ExprResult Result = getDerived().TransformExpr(S->getRetValue());
+ ExprResult Result = getDerived().TransformInitializer(S->getRetValue(),
+ /*NotCopyInit*/false);
if (Result.isInvalid())
return StmtError();
@@ -6380,9 +6580,8 @@ StmtResult TreeTransform<Derived>::TransformSEHTryStmt(SEHTryStmt *S) {
Handler.get() == S->getHandler())
return S;
- return getDerived().RebuildSEHTryStmt(
- S->getIsCXXTry(), S->getTryLoc(), TryBlock.get(), Handler.get(),
- S->getHandlerIndex(), S->getHandlerParentIndex());
+ return getDerived().RebuildSEHTryStmt(S->getIsCXXTry(), S->getTryLoc(),
+ TryBlock.get(), Handler.get());
}
template <typename Derived>
@@ -6504,6 +6703,17 @@ TreeTransform<Derived>::TransformOMPForDirective(OMPForDirective *D) {
template <typename Derived>
StmtResult
+TreeTransform<Derived>::TransformOMPForSimdDirective(OMPForSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_for_simd, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
TreeTransform<Derived>::TransformOMPSectionsDirective(OMPSectionsDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_sections, DirName, nullptr,
@@ -6568,6 +6778,17 @@ StmtResult TreeTransform<Derived>::TransformOMPParallelForDirective(
}
template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPParallelForSimdDirective(
+ OMPParallelForSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_for_simd, DirName,
+ nullptr, D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
DeclarationNameInfo DirName;
@@ -6633,6 +6854,50 @@ TreeTransform<Derived>::TransformOMPFlushDirective(OMPFlushDirective *D) {
return Res;
}
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPOrderedDirective(OMPOrderedDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_ordered, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPAtomicDirective(OMPAtomicDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_atomic, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPTargetDirective(OMPTargetDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_target, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPTeamsDirective(OMPTeamsDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_teams, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
//===----------------------------------------------------------------------===//
// OpenMP clause transformation
//===----------------------------------------------------------------------===//
@@ -6740,6 +7005,39 @@ TreeTransform<Derived>::TransformOMPMergeableClause(OMPMergeableClause *C) {
}
template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPReadClause(OMPReadClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPWriteClause(OMPWriteClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPUpdateClause(OMPUpdateClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPCaptureClause(OMPCaptureClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPSeqCstClause(OMPSeqCstClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPPrivateClause(OMPPrivateClause *C) {
llvm::SmallVector<Expr *, 16> Vars;
@@ -6912,7 +7210,11 @@ OMPClause *TreeTransform<Derived>::TransformOMPFlushClause(OMPFlushClause *C) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformPredefinedExpr(PredefinedExpr *E) {
- return E;
+ if (!E->isTypeDependent())
+ return E;
+
+ return getDerived().RebuildPredefinedExpr(E->getLocation(),
+ E->getIdentType());
}
template<typename Derived>
@@ -7161,6 +7463,12 @@ TreeTransform<Derived>::TransformOpaqueValueExpr(OpaqueValueExpr *E) {
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformTypoExpr(TypoExpr *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) {
// Rebuild the syntactic form. The original syntactic form has
// opaque-value expressions in it, so strip those away and rebuild
@@ -7873,15 +8181,11 @@ TreeTransform<Derived>::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) {
Type == E->getTypeInfoAsWritten() &&
SubExpr.get() == E->getSubExpr())
return E;
- return getDerived().RebuildCXXNamedCastExpr(E->getOperatorLoc(),
- E->getStmtClass(),
- E->getAngleBrackets().getBegin(),
- Type,
- E->getAngleBrackets().getEnd(),
- // FIXME. this should be '(' location
- E->getAngleBrackets().getEnd(),
- SubExpr.get(),
- E->getRParenLoc());
+ return getDerived().RebuildCXXNamedCastExpr(
+ E->getOperatorLoc(), E->getStmtClass(), E->getAngleBrackets().getBegin(),
+ Type, E->getAngleBrackets().getEnd(),
+ // FIXME. this should be '(' location
+ E->getAngleBrackets().getEnd(), SubExpr.get(), E->getRParenLoc());
}
template<typename Derived>
@@ -8786,113 +9090,68 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
-
- // Transform any init-capture expressions before entering the scope of the
+ // Transform any init-capture expressions before entering the scope of the
// lambda body, because they are not semantically within that scope.
SmallVector<InitCaptureInfoTy, 8> InitCaptureExprsAndTypes;
InitCaptureExprsAndTypes.resize(E->explicit_capture_end() -
E->explicit_capture_begin());
-
for (LambdaExpr::capture_iterator C = E->capture_begin(),
- CEnd = E->capture_end();
- C != CEnd; ++C) {
+ CEnd = E->capture_end();
+ C != CEnd; ++C) {
if (!C->isInitCapture())
continue;
- EnterExpressionEvaluationContext EEEC(getSema(),
- Sema::PotentiallyEvaluated);
+ EnterExpressionEvaluationContext EEEC(getSema(),
+ Sema::PotentiallyEvaluated);
ExprResult NewExprInitResult = getDerived().TransformInitializer(
C->getCapturedVar()->getInit(),
C->getCapturedVar()->getInitStyle() == VarDecl::CallInit);
-
+
if (NewExprInitResult.isInvalid())
return ExprError();
Expr *NewExprInit = NewExprInitResult.get();
-
+
VarDecl *OldVD = C->getCapturedVar();
- QualType NewInitCaptureType =
- getSema().performLambdaInitCaptureInitialization(C->getLocation(),
- OldVD->getType()->isReferenceType(), OldVD->getIdentifier(),
+ QualType NewInitCaptureType =
+ getSema().performLambdaInitCaptureInitialization(C->getLocation(),
+ OldVD->getType()->isReferenceType(), OldVD->getIdentifier(),
NewExprInit);
NewExprInitResult = NewExprInit;
InitCaptureExprsAndTypes[C - E->capture_begin()] =
std::make_pair(NewExprInitResult, NewInitCaptureType);
-
}
LambdaScopeInfo *LSI = getSema().PushLambdaScope();
+ Sema::FunctionScopeRAII FuncScopeCleanup(getSema());
+
// Transform the template parameters, and add them to the current
// instantiation scope. The null case is handled correctly.
LSI->GLTemplateParameterList = getDerived().TransformTemplateParameterList(
E->getTemplateParameterList());
- // Check to see if the TypeSourceInfo of the call operator needs to
- // be transformed, and if so do the transformation in the
- // CurrentInstantiationScope.
-
- TypeSourceInfo *OldCallOpTSI = E->getCallOperator()->getTypeSourceInfo();
- FunctionProtoTypeLoc OldCallOpFPTL =
- OldCallOpTSI->getTypeLoc().getAs<FunctionProtoTypeLoc>();
+ // Transform the type of the original lambda's call operator.
+ // The transformation MUST be done in the CurrentInstantiationScope since
+ // it introduces a mapping of the original to the newly created
+ // transformed parameters.
TypeSourceInfo *NewCallOpTSI = nullptr;
-
- const bool CallOpWasAlreadyTransformed =
- getDerived().AlreadyTransformed(OldCallOpTSI->getType());
-
- // Use the Old Call Operator's TypeSourceInfo if it is already transformed.
- if (CallOpWasAlreadyTransformed)
- NewCallOpTSI = OldCallOpTSI;
- else {
- // Transform the TypeSourceInfo of the Original Lambda's Call Operator.
- // The transformation MUST be done in the CurrentInstantiationScope since
- // it introduces a mapping of the original to the newly created
- // transformed parameters.
+ {
+ TypeSourceInfo *OldCallOpTSI = E->getCallOperator()->getTypeSourceInfo();
+ FunctionProtoTypeLoc OldCallOpFPTL =
+ OldCallOpTSI->getTypeLoc().getAs<FunctionProtoTypeLoc>();
TypeLocBuilder NewCallOpTLBuilder;
- QualType NewCallOpType = TransformFunctionProtoType(NewCallOpTLBuilder,
- OldCallOpFPTL,
- nullptr, 0);
+ SmallVector<QualType, 4> ExceptionStorage;
+ TreeTransform *This = this; // Work around gcc.gnu.org/PR56135.
+ QualType NewCallOpType = TransformFunctionProtoType(
+ NewCallOpTLBuilder, OldCallOpFPTL, nullptr, 0,
+ [&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
+ return This->TransformExceptionSpec(OldCallOpFPTL.getBeginLoc(), ESI,
+ ExceptionStorage, Changed);
+ });
+ if (NewCallOpType.isNull())
+ return ExprError();
NewCallOpTSI = NewCallOpTLBuilder.getTypeSourceInfo(getSema().Context,
NewCallOpType);
}
- // Extract the ParmVarDecls from the NewCallOpTSI and add them to
- // the vector below - this will be used to synthesize the
- // NewCallOperator. Additionally, add the parameters of the untransformed
- // lambda call operator to the CurrentInstantiationScope.
- SmallVector<ParmVarDecl *, 4> Params;
- {
- FunctionProtoTypeLoc NewCallOpFPTL =
- NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>();
- ParmVarDecl **NewParamDeclArray = NewCallOpFPTL.getParmArray();
- const unsigned NewNumArgs = NewCallOpFPTL.getNumParams();
-
- for (unsigned I = 0; I < NewNumArgs; ++I) {
- // If this call operator's type does not require transformation,
- // the parameters do not get added to the current instantiation scope,
- // - so ADD them! This allows the following to compile when the enclosing
- // template is specialized and the entire lambda expression has to be
- // transformed.
- // template<class T> void foo(T t) {
- // auto L = [](auto a) {
- // auto M = [](char b) { <-- note: non-generic lambda
- // auto N = [](auto c) {
- // int x = sizeof(a);
- // x = sizeof(b); <-- specifically this line
- // x = sizeof(c);
- // };
- // };
- // };
- // }
- // foo('a')
- if (CallOpWasAlreadyTransformed)
- getDerived().transformedLocalDecl(NewParamDeclArray[I],
- NewParamDeclArray[I]);
- // Add to Params array, so these parameters can be used to create
- // the newly transformed call operator.
- Params.push_back(NewParamDeclArray[I]);
- }
- }
-
- if (!NewCallOpTSI)
- return ExprError();
// Create the local class that will describe the lambda.
CXXRecordDecl *Class
@@ -8900,19 +9159,21 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
NewCallOpTSI,
/*KnownDependent=*/false,
E->getCaptureDefault());
-
getDerived().transformedLocalDecl(E->getLambdaClass(), Class);
// Build the call operator.
- CXXMethodDecl *NewCallOperator
- = getSema().startLambdaDefinition(Class, E->getIntroducerRange(),
- NewCallOpTSI,
- E->getCallOperator()->getLocEnd(),
- Params);
+ CXXMethodDecl *NewCallOperator = getSema().startLambdaDefinition(
+ Class, E->getIntroducerRange(), NewCallOpTSI,
+ E->getCallOperator()->getLocEnd(),
+ NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams());
LSI->CallOperator = NewCallOperator;
getDerived().transformAttrs(E->getCallOperator(), NewCallOperator);
+ // TransformLambdaScope will manage the function scope, so we can disable the
+ // cleanup.
+ FuncScopeCleanup.disable();
+
return getDerived().TransformLambdaScope(E, NewCallOperator,
InitCaptureExprsAndTypes);
}
@@ -8954,6 +9215,10 @@ TreeTransform<Derived>::TransformLambdaScope(LambdaExpr *E,
getSema().CheckCXXThisCapture(C->getLocation(), C->isExplicit());
continue;
}
+ // Captured expression will be recaptured during captured variables
+ // rebuilding.
+ if (C->capturesVLAType())
+ continue;
// Rebuild init-captures, including the implied field declaration.
if (C->isInitCapture()) {
@@ -9033,7 +9298,7 @@ TreeTransform<Derived>::TransformLambdaScope(LambdaExpr *E,
VarDecl *CapturedVar
= cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(),
C->getCapturedVar()));
- if (!CapturedVar) {
+ if (!CapturedVar || CapturedVar->isInvalidDecl()) {
Invalid = true;
continue;
}
@@ -9398,6 +9663,128 @@ TreeTransform<Derived>::TransformMaterializeTemporaryExpr(
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
+ Expr *Pattern = E->getPattern();
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions;
+ if (getDerived().TryExpandParameterPacks(E->getEllipsisLoc(),
+ Pattern->getSourceRange(),
+ Unexpanded,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return true;
+
+ if (!Expand) {
+ // Do not expand any packs here, just transform and rebuild a fold
+ // expression.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+
+ ExprResult LHS =
+ E->getLHS() ? getDerived().TransformExpr(E->getLHS()) : ExprResult();
+ if (LHS.isInvalid())
+ return true;
+
+ ExprResult RHS =
+ E->getRHS() ? getDerived().TransformExpr(E->getRHS()) : ExprResult();
+ if (RHS.isInvalid())
+ return true;
+
+ if (!getDerived().AlwaysRebuild() &&
+ LHS.get() == E->getLHS() && RHS.get() == E->getRHS())
+ return E;
+
+ return getDerived().RebuildCXXFoldExpr(
+ E->getLocStart(), LHS.get(), E->getOperator(), E->getEllipsisLoc(),
+ RHS.get(), E->getLocEnd());
+ }
+
+ // The transform has determined that we should perform an elementwise
+ // expansion of the pattern. Do so.
+ ExprResult Result = getDerived().TransformExpr(E->getInit());
+ if (Result.isInvalid())
+ return true;
+ bool LeftFold = E->isLeftFold();
+
+ // If we're retaining an expansion for a right fold, it is the innermost
+ // component and takes the init (if any).
+ if (!LeftFold && RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+
+ ExprResult Out = getDerived().TransformExpr(Pattern);
+ if (Out.isInvalid())
+ return true;
+
+ Result = getDerived().RebuildCXXFoldExpr(
+ E->getLocStart(), Out.get(), E->getOperator(), E->getEllipsisLoc(),
+ Result.get(), E->getLocEnd());
+ if (Result.isInvalid())
+ return true;
+ }
+
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(
+ getSema(), LeftFold ? I : *NumExpansions - I - 1);
+ ExprResult Out = getDerived().TransformExpr(Pattern);
+ if (Out.isInvalid())
+ return true;
+
+ if (Out.get()->containsUnexpandedParameterPack()) {
+ // We still have a pack; retain a pack expansion for this slice.
+ Result = getDerived().RebuildCXXFoldExpr(
+ E->getLocStart(),
+ LeftFold ? Result.get() : Out.get(),
+ E->getOperator(), E->getEllipsisLoc(),
+ LeftFold ? Out.get() : Result.get(),
+ E->getLocEnd());
+ } else if (Result.isUsable()) {
+ // We've got down to a single element; build a binary operator.
+ Result = getDerived().RebuildBinaryOperator(
+ E->getEllipsisLoc(), E->getOperator(),
+ LeftFold ? Result.get() : Out.get(),
+ LeftFold ? Out.get() : Result.get());
+ } else
+ Result = Out;
+
+ if (Result.isInvalid())
+ return true;
+ }
+
+ // If we're retaining an expansion for a left fold, it is the outermost
+ // component and takes the complete expansion so far as its init (if any).
+ if (LeftFold && RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+
+ ExprResult Out = getDerived().TransformExpr(Pattern);
+ if (Out.isInvalid())
+ return true;
+
+ Result = getDerived().RebuildCXXFoldExpr(
+ E->getLocStart(), Result.get(),
+ E->getOperator(), E->getEllipsisLoc(),
+ Out.get(), E->getLocEnd());
+ if (Result.isInvalid())
+ return true;
+ }
+
+ // If we had no init and an empty pack, and we're not retaining an expansion,
+ // then produce a fallback value or error.
+ if (Result.isUnset())
+ return getDerived().RebuildEmptyCXXFoldExpr(E->getEllipsisLoc(),
+ E->getOperator());
+
+ return Result;
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformCXXStdInitializerListExpr(
CXXStdInitializerListExpr *E) {
return getDerived().TransformExpr(E->getSubExpr());
@@ -10292,7 +10679,7 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
SourceLocation RBrace;
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Callee)) {
- DeclarationNameLoc &NameLoc = DRE->getNameInfo().getInfo();
+ DeclarationNameLoc NameLoc = DRE->getNameInfo().getInfo();
LBrace = SourceLocation::getFromRawEncoding(
NameLoc.CXXOperatorName.BeginOpNameLoc);
RBrace = SourceLocation::getFromRawEncoding(
@@ -10348,9 +10735,16 @@ TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base,
// The scope type is now known to be a valid nested name specifier
// component. Tack it on to the end of the nested name specifier.
- if (ScopeType)
- SS.Extend(SemaRef.Context, SourceLocation(),
- ScopeType->getTypeLoc(), CCLoc);
+ if (ScopeType) {
+ if (!ScopeType->getType()->getAs<TagType>()) {
+ getSema().Diag(ScopeType->getTypeLoc().getBeginLoc(),
+ diag::err_expected_class_or_namespace)
+ << ScopeType->getType() << getSema().getLangOpts().CPlusPlus;
+ return ExprError();
+ }
+ SS.Extend(SemaRef.Context, SourceLocation(), ScopeType->getTypeLoc(),
+ CCLoc);
+ }
SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
return getSema().BuildMemberReferenceExpr(Base, BaseType,
@@ -10397,4 +10791,4 @@ TreeTransform<Derived>::TransformCapturedStmt(CapturedStmt *S) {
} // end namespace clang
-#endif // LLVM_CLANG_SEMA_TREETRANSFORM_H
+#endif
diff --git a/lib/Sema/TypeLocBuilder.h b/lib/Sema/TypeLocBuilder.h
index c3f874e1a854..82844b391467 100644
--- a/lib/Sema/TypeLocBuilder.h
+++ b/lib/Sema/TypeLocBuilder.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SEMA_TYPELOCBUILDER_H
-#define LLVM_CLANG_SEMA_TYPELOCBUILDER_H
+#ifndef LLVM_CLANG_LIB_SEMA_TYPELOCBUILDER_H
+#define LLVM_CLANG_LIB_SEMA_TYPELOCBUILDER_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/TypeLoc.h"
diff --git a/lib/Serialization/ASTCommon.cpp b/lib/Serialization/ASTCommon.cpp
index ad046ffa277a..13393225b60e 100644
--- a/lib/Serialization/ASTCommon.cpp
+++ b/lib/Serialization/ASTCommon.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "ASTCommon.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Serialization/ASTDeserializationListener.h"
@@ -150,7 +151,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
switch (static_cast<Decl::Kind>(Kind)) {
case Decl::TranslationUnit: // Special case of a "merged" declaration.
case Decl::Namespace:
- case Decl::NamespaceAlias: // FIXME: Not yet redeclarable, but will be.
+ case Decl::NamespaceAlias:
case Decl::Typedef:
case Decl::TypeAlias:
case Decl::Enum:
@@ -188,8 +189,6 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::MSProperty:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
- case Decl::ImplicitParam:
- case Decl::ParmVar:
case Decl::NonTypeTemplateParm:
case Decl::TemplateTemplateParm:
case Decl::Using:
@@ -212,7 +211,20 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::Import:
case Decl::OMPThreadPrivate:
return false;
+
+ // These indirectly derive from Redeclarable<T> but are not actually
+ // redeclarable.
+ case Decl::ImplicitParam:
+ case Decl::ParmVar:
+ return false;
}
llvm_unreachable("Unhandled declaration kind");
}
+
+bool serialization::needsAnonymousDeclarationNumber(const NamedDecl *D) {
+ if (D->getDeclName() || !isa<CXXRecordDecl>(D->getLexicalDeclContext()))
+ return false;
+ return isa<TagDecl>(D) || isa<FieldDecl>(D);
+}
+
diff --git a/lib/Serialization/ASTCommon.h b/lib/Serialization/ASTCommon.h
index c7669749260b..38a0ff5fbd4e 100644
--- a/lib/Serialization/ASTCommon.h
+++ b/lib/Serialization/ASTCommon.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SERIALIZATION_LIB_AST_COMMON_H
-#define LLVM_CLANG_SERIALIZATION_LIB_AST_COMMON_H
+#ifndef LLVM_CLANG_LIB_SERIALIZATION_ASTCOMMON_H
+#define LLVM_CLANG_LIB_SERIALIZATION_ASTCOMMON_H
#include "clang/AST/ASTContext.h"
#include "clang/Serialization/ASTBitCodes.h"
@@ -25,14 +25,15 @@ enum DeclUpdateKind {
UPD_CXX_ADDED_IMPLICIT_MEMBER,
UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
UPD_CXX_ADDED_ANONYMOUS_NAMESPACE,
+ UPD_CXX_ADDED_FUNCTION_DEFINITION,
UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER,
- UPD_CXX_INSTANTIATED_FUNCTION_DEFINITION,
UPD_CXX_INSTANTIATED_CLASS_DEFINITION,
UPD_CXX_RESOLVED_EXCEPTION_SPEC,
UPD_CXX_DEDUCED_RETURN_TYPE,
UPD_DECL_MARKED_USED,
UPD_MANGLING_NUMBER,
- UPD_STATIC_LOCAL_NUMBER
+ UPD_STATIC_LOCAL_NUMBER,
+ UPD_DECL_MARKED_OPENMP_THREADPRIVATE
};
TypeIdx TypeIdxFromBuiltin(const BuiltinType *BT);
@@ -80,6 +81,10 @@ const DeclContext *getDefinitiveDeclContext(const DeclContext *DC);
/// \brief Determine whether the given declaration kind is redeclarable.
bool isRedeclarableDeclKind(unsigned Kind);
+/// \brief Determine whether the given declaration needs an anonymous
+/// declaration number.
+bool needsAnonymousDeclarationNumber(const NamedDecl *D);
+
} // namespace serialization
} // namespace clang
diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp
index ae41654904c6..416164ebb7d2 100644
--- a/lib/Serialization/ASTReader.cpp
+++ b/lib/Serialization/ASTReader.cpp
@@ -80,10 +80,14 @@ void ChainedASTReaderListener::ReadModuleMapFile(StringRef ModuleMapPath) {
First->ReadModuleMapFile(ModuleMapPath);
Second->ReadModuleMapFile(ModuleMapPath);
}
-bool ChainedASTReaderListener::ReadLanguageOptions(const LangOptions &LangOpts,
- bool Complain) {
- return First->ReadLanguageOptions(LangOpts, Complain) ||
- Second->ReadLanguageOptions(LangOpts, Complain);
+bool
+ChainedASTReaderListener::ReadLanguageOptions(const LangOptions &LangOpts,
+ bool Complain,
+ bool AllowCompatibleDifferences) {
+ return First->ReadLanguageOptions(LangOpts, Complain,
+ AllowCompatibleDifferences) ||
+ Second->ReadLanguageOptions(LangOpts, Complain,
+ AllowCompatibleDifferences);
}
bool
ChainedASTReaderListener::ReadTargetOptions(const TargetOptions &TargetOpts,
@@ -155,11 +159,14 @@ ASTReaderListener::~ASTReaderListener() {}
/// language options.
///
/// \param Diags If non-NULL, diagnostics will be emitted via this engine.
+/// \param AllowCompatibleDifferences If true, differences between compatible
+/// language options will be permitted.
///
/// \returns true if the languagae options mis-match, false otherwise.
static bool checkLanguageOptions(const LangOptions &LangOpts,
const LangOptions &ExistingLangOpts,
- DiagnosticsEngine *Diags) {
+ DiagnosticsEngine *Diags,
+ bool AllowCompatibleDifferences = true) {
#define LANGOPT(Name, Bits, Default, Description) \
if (ExistingLangOpts.Name != LangOpts.Name) { \
if (Diags) \
@@ -184,6 +191,14 @@ static bool checkLanguageOptions(const LangOptions &LangOpts,
return true; \
}
+#define COMPATIBLE_LANGOPT(Name, Bits, Default, Description) \
+ if (!AllowCompatibleDifferences) \
+ LANGOPT(Name, Bits, Default, Description)
+
+#define COMPATIBLE_ENUM_LANGOPT(Name, Bits, Default, Description) \
+ if (!AllowCompatibleDifferences) \
+ ENUM_LANGOPT(Name, Bits, Default, Description)
+
#define BENIGN_LANGOPT(Name, Bits, Default, Description)
#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
#include "clang/Basic/LangOptions.def"
@@ -278,10 +293,12 @@ static bool checkTargetOptions(const TargetOptions &TargetOpts,
bool
PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts,
- bool Complain) {
+ bool Complain,
+ bool AllowCompatibleDifferences) {
const LangOptions &ExistingLangOpts = PP.getLangOpts();
return checkLanguageOptions(LangOpts, ExistingLangOpts,
- Complain? &Reader.Diags : nullptr);
+ Complain ? &Reader.Diags : nullptr,
+ AllowCompatibleDifferences);
}
bool PCHValidator::ReadTargetOptions(const TargetOptions &TargetOpts,
@@ -389,14 +406,14 @@ bool PCHValidator::ReadDiagnosticOptions(
// If the original import came from a file explicitly generated by the user,
// don't check the diagnostic mappings.
// FIXME: currently this is approximated by checking whether this is not a
- // module import.
+ // module import of an implicitly-loaded module file.
// Note: ModuleMgr.rbegin() may not be the current module, but it must be in
// the transitive closure of its imports, since unrelated modules cannot be
// imported until after this module finishes validation.
ModuleFile *TopImport = *ModuleMgr.rbegin();
while (!TopImport->ImportedBy.empty())
TopImport = TopImport->ImportedBy[0];
- if (TopImport->Kind != MK_Module)
+ if (TopImport->Kind != MK_ImplicitModule)
return false;
StringRef ModuleName = TopImport->ModuleName;
@@ -543,8 +560,7 @@ static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
continue;
SuggestedPredefines += "#include \"";
- SuggestedPredefines +=
- HeaderSearch::NormalizeDashIncludePath(File, FileMgr);
+ SuggestedPredefines += File;
SuggestedPredefines += "\"\n";
}
@@ -556,8 +572,7 @@ static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
continue;
SuggestedPredefines += "#__include_macros \"";
- SuggestedPredefines +=
- HeaderSearch::NormalizeDashIncludePath(File, FileMgr);
+ SuggestedPredefines += File;
SuggestedPredefines += "\"\n##\n";
}
@@ -635,14 +650,14 @@ ASTSelectorLookupTrait::ReadData(Selector, const unsigned char* d,
Result.ID = Reader.getGlobalSelectorID(
F, endian::readNext<uint32_t, little, unaligned>(d));
- unsigned NumInstanceMethodsAndBits =
- endian::readNext<uint16_t, little, unaligned>(d);
- unsigned NumFactoryMethodsAndBits =
- endian::readNext<uint16_t, little, unaligned>(d);
- Result.InstanceBits = NumInstanceMethodsAndBits & 0x3;
- Result.FactoryBits = NumFactoryMethodsAndBits & 0x3;
- unsigned NumInstanceMethods = NumInstanceMethodsAndBits >> 2;
- unsigned NumFactoryMethods = NumFactoryMethodsAndBits >> 2;
+ unsigned FullInstanceBits = endian::readNext<uint16_t, little, unaligned>(d);
+ unsigned FullFactoryBits = endian::readNext<uint16_t, little, unaligned>(d);
+ Result.InstanceBits = FullInstanceBits & 0x3;
+ Result.InstanceHasMoreThanOneDecl = (FullInstanceBits >> 2) & 0x1;
+ Result.FactoryBits = FullFactoryBits & 0x3;
+ Result.FactoryHasMoreThanOneDecl = (FullFactoryBits >> 2) & 0x1;
+ unsigned NumInstanceMethods = FullInstanceBits >> 3;
+ unsigned NumFactoryMethods = FullFactoryBits >> 3;
// Load instance methods
for (unsigned I = 0; I != NumInstanceMethods; ++I) {
@@ -774,15 +789,16 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
DataLen -= 4;
SmallVector<uint32_t, 8> LocalMacroIDs;
if (hasSubmoduleMacros) {
- while (uint32_t LocalMacroID =
- endian::readNext<uint32_t, little, unaligned>(d)) {
+ while (true) {
+ uint32_t LocalMacroID =
+ endian::readNext<uint32_t, little, unaligned>(d);
DataLen -= 4;
+ if (LocalMacroID == 0xdeadbeef) break;
LocalMacroIDs.push_back(LocalMacroID);
}
- DataLen -= 4;
}
- if (F.Kind == MK_Module) {
+ if (F.Kind == MK_ImplicitModule || F.Kind == MK_ExplicitModule) {
// Macro definitions are stored from newest to oldest, so reverse them
// before registering them.
llvm::SmallVector<unsigned, 8> MacroSizes;
@@ -1013,7 +1029,7 @@ void ASTReader::Error(unsigned DiagID,
/// \brief Read the line table in the source manager block.
/// \returns true if there was an error.
bool ASTReader::ParseLineTable(ModuleFile &F,
- SmallVectorImpl<uint64_t> &Record) {
+ const RecordData &Record) {
unsigned Idx = 0;
LineTableInfo &LineTable = SourceMgr.getLineTable();
@@ -1021,10 +1037,7 @@ bool ASTReader::ParseLineTable(ModuleFile &F,
std::map<int, int> FileIDs;
for (int I = 0, N = Record[Idx++]; I != N; ++I) {
// Extract the file name
- unsigned FilenameLen = Record[Idx++];
- std::string Filename(&Record[Idx], &Record[Idx] + FilenameLen);
- Idx += FilenameLen;
- MaybeAddSystemRootToFilename(F, Filename);
+ auto Filename = ReadPath(F, Record, Idx);
FileIDs[I] = LineTable.getLineTableFilenameID(Filename);
}
@@ -1225,9 +1238,9 @@ bool ASTReader::ReadSLocEntry(int ID) {
return true;
}
- llvm::MemoryBuffer *Buffer
+ std::unique_ptr<llvm::MemoryBuffer> Buffer
= llvm::MemoryBuffer::getMemBuffer(Blob.drop_back(1), File->getName());
- SourceMgr.overrideFileContents(File, Buffer);
+ SourceMgr.overrideFileContents(File, std::move(Buffer));
}
break;
@@ -1239,7 +1252,8 @@ bool ASTReader::ReadSLocEntry(int ID) {
SrcMgr::CharacteristicKind
FileCharacter = (SrcMgr::CharacteristicKind)Record[2];
SourceLocation IncludeLoc = ReadSourceLocation(*F, Record[1]);
- if (IncludeLoc.isInvalid() && F->Kind == MK_Module) {
+ if (IncludeLoc.isInvalid() &&
+ (F->Kind == MK_ImplicitModule || F->Kind == MK_ExplicitModule)) {
IncludeLoc = getImportLocation(F);
}
unsigned Code = SLocEntryCursor.ReadCode();
@@ -1252,10 +1266,10 @@ bool ASTReader::ReadSLocEntry(int ID) {
return true;
}
- llvm::MemoryBuffer *Buffer
- = llvm::MemoryBuffer::getMemBuffer(Blob.drop_back(1), Name);
- SourceMgr.createFileID(Buffer, FileCharacter, ID, BaseOffset + Offset,
- IncludeLoc);
+ std::unique_ptr<llvm::MemoryBuffer> Buffer =
+ llvm::MemoryBuffer::getMemBuffer(Blob.drop_back(1), Name);
+ SourceMgr.createFileID(std::move(Buffer), FileCharacter, ID,
+ BaseOffset + Offset, IncludeLoc);
break;
}
@@ -1285,7 +1299,7 @@ std::pair<SourceLocation, StringRef> ASTReader::getModuleImportLoc(int ID) {
// Find which module file this entry lands in.
ModuleFile *M = GlobalSLocEntryMap.find(-ID)->second;
- if (M->Kind != MK_Module)
+ if (M->Kind != MK_ImplicitModule && M->Kind != MK_ExplicitModule)
return std::make_pair(SourceLocation(), "");
// FIXME: Can we map this down to a particular submodule? That would be
@@ -1466,11 +1480,11 @@ ASTReader::getGlobalPreprocessedEntityID(ModuleFile &M, unsigned LocalID) const
unsigned HeaderFileInfoTrait::ComputeHash(internal_key_ref ikey) {
return llvm::hash_combine(ikey.Size, ikey.ModTime);
}
-
+
HeaderFileInfoTrait::internal_key_type
HeaderFileInfoTrait::GetInternalKey(const FileEntry *FE) {
internal_key_type ikey = { FE->getSize(), FE->getModificationTime(),
- FE->getName() };
+ FE->getName(), /*Imported*/false };
return ikey;
}
@@ -1478,14 +1492,24 @@ bool HeaderFileInfoTrait::EqualKey(internal_key_ref a, internal_key_ref b) {
if (a.Size != b.Size || a.ModTime != b.ModTime)
return false;
- if (strcmp(a.Filename, b.Filename) == 0)
+ if (llvm::sys::path::is_absolute(a.Filename) &&
+ strcmp(a.Filename, b.Filename) == 0)
return true;
// Determine whether the actual files are equivalent.
FileManager &FileMgr = Reader.getFileManager();
- const FileEntry *FEA = FileMgr.getFile(a.Filename);
- const FileEntry *FEB = FileMgr.getFile(b.Filename);
- return (FEA && FEA == FEB);
+ auto GetFile = [&](const internal_key_type &Key) -> const FileEntry* {
+ if (!Key.Imported)
+ return FileMgr.getFile(Key.Filename);
+
+ std::string Resolved = Key.Filename;
+ Reader.ResolveImportedPath(M, Resolved);
+ return FileMgr.getFile(Resolved);
+ };
+
+ const FileEntry *FEA = GetFile(a);
+ const FileEntry *FEB = GetFile(b);
+ return FEA && FEA == FEB;
}
std::pair<unsigned, unsigned>
@@ -1503,6 +1527,7 @@ HeaderFileInfoTrait::ReadKey(const unsigned char *d, unsigned) {
ikey.Size = off_t(endian::readNext<uint64_t, little, unaligned>(d));
ikey.ModTime = time_t(endian::readNext<uint64_t, little, unaligned>(d));
ikey.Filename = (const char *)d;
+ ikey.Imported = true;
return ikey;
}
@@ -1542,7 +1567,14 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
FileManager &FileMgr = Reader.getFileManager();
ModuleMap &ModMap =
Reader.getPreprocessor().getHeaderSearchInfo().getModuleMap();
- ModMap.addHeader(Mod, FileMgr.getFile(key.Filename), HFI.getHeaderRole());
+ // FIXME: This information should be propagated through the
+ // SUBMODULE_HEADER etc records rather than from here.
+ // FIXME: We don't ever mark excluded headers.
+ std::string Filename = key.Filename;
+ if (key.Imported)
+ Reader.ResolveImportedPath(M, Filename);
+ Module::Header H = { key.Filename, FileMgr.getFile(Filename) };
+ ModMap.addHeader(Mod, H, HFI.getHeaderRole());
}
}
@@ -1732,10 +1764,12 @@ struct ASTReader::ModuleMacroInfo {
return llvm::makeArrayRef(Overrides + 1, *Overrides);
}
- DefMacroDirective *import(Preprocessor &PP, SourceLocation ImportLoc) const {
+ MacroDirective *import(Preprocessor &PP, SourceLocation ImportLoc) const {
if (!MI)
- return nullptr;
- return PP.AllocateDefMacroDirective(MI, ImportLoc, /*isImported=*/true);
+ return PP.AllocateUndefMacroDirective(ImportLoc, SubModID,
+ getOverriddenSubmodules());
+ return PP.AllocateDefMacroDirective(MI, ImportLoc, SubModID,
+ getOverriddenSubmodules());
}
};
@@ -1772,7 +1806,8 @@ void ASTReader::resolvePendingMacro(IdentifierInfo *II,
const PendingMacroInfo &PMInfo) {
assert(II);
- if (PMInfo.M->Kind != MK_Module) {
+ if (PMInfo.M->Kind != MK_ImplicitModule &&
+ PMInfo.M->Kind != MK_ExplicitModule) {
installPCHMacroDirectives(II, *PMInfo.M,
PMInfo.PCHMacroData.MacroDirectivesOffset);
return;
@@ -1790,13 +1825,13 @@ void ASTReader::resolvePendingMacro(IdentifierInfo *II,
// install if we make this module visible.
HiddenNamesMap[Owner].HiddenMacros.insert(std::make_pair(II, MMI));
} else {
- installImportedMacro(II, MMI, Owner, /*FromFinalization*/false);
+ installImportedMacro(II, MMI, Owner);
}
}
void ASTReader::installPCHMacroDirectives(IdentifierInfo *II,
ModuleFile &M, uint64_t Offset) {
- assert(M.Kind != MK_Module);
+ assert(M.Kind != MK_ImplicitModule && M.Kind != MK_ExplicitModule);
BitstreamCursor &Cursor = M.MacroCursor;
SavedStreamPosition SavedPosition(Cursor);
@@ -1828,23 +1863,36 @@ void ASTReader::installPCHMacroDirectives(IdentifierInfo *II,
case MacroDirective::MD_Define: {
GlobalMacroID GMacID = getGlobalMacroID(M, Record[Idx++]);
MacroInfo *MI = getMacro(GMacID);
- bool isImported = Record[Idx++];
- bool isAmbiguous = Record[Idx++];
+ SubmoduleID ImportedFrom = Record[Idx++];
+ bool IsAmbiguous = Record[Idx++];
+ llvm::SmallVector<unsigned, 4> Overrides;
+ if (ImportedFrom) {
+ Overrides.insert(Overrides.end(),
+ &Record[Idx] + 1, &Record[Idx] + 1 + Record[Idx]);
+ Idx += Overrides.size() + 1;
+ }
DefMacroDirective *DefMD =
- PP.AllocateDefMacroDirective(MI, Loc, isImported);
- DefMD->setAmbiguous(isAmbiguous);
+ PP.AllocateDefMacroDirective(MI, Loc, ImportedFrom, Overrides);
+ DefMD->setAmbiguous(IsAmbiguous);
MD = DefMD;
break;
}
- case MacroDirective::MD_Undefine:
- MD = PP.AllocateUndefMacroDirective(Loc);
+ case MacroDirective::MD_Undefine: {
+ SubmoduleID ImportedFrom = Record[Idx++];
+ llvm::SmallVector<unsigned, 4> Overrides;
+ if (ImportedFrom) {
+ Overrides.insert(Overrides.end(),
+ &Record[Idx] + 1, &Record[Idx] + 1 + Record[Idx]);
+ Idx += Overrides.size() + 1;
+ }
+ MD = PP.AllocateUndefMacroDirective(Loc, ImportedFrom, Overrides);
break;
- case MacroDirective::MD_Visibility: {
+ }
+ case MacroDirective::MD_Visibility:
bool isPublic = Record[Idx++];
MD = PP.AllocateVisibilityMacroDirective(Loc, isPublic);
break;
}
- }
if (!Latest)
Latest = MD;
@@ -1877,19 +1925,27 @@ static bool areDefinedInSystemModules(MacroInfo *PrevMI, MacroInfo *NewMI,
}
void ASTReader::removeOverriddenMacros(IdentifierInfo *II,
+ SourceLocation ImportLoc,
AmbiguousMacros &Ambig,
ArrayRef<SubmoduleID> Overrides) {
for (unsigned OI = 0, ON = Overrides.size(); OI != ON; ++OI) {
SubmoduleID OwnerID = Overrides[OI];
// If this macro is not yet visible, remove it from the hidden names list.
+ // It won't be there if we're in the middle of making the owner visible.
Module *Owner = getSubmodule(OwnerID);
- HiddenNames &Hidden = HiddenNamesMap[Owner];
- HiddenMacrosMap::iterator HI = Hidden.HiddenMacros.find(II);
- if (HI != Hidden.HiddenMacros.end()) {
- auto SubOverrides = HI->second->getOverriddenSubmodules();
- Hidden.HiddenMacros.erase(HI);
- removeOverriddenMacros(II, Ambig, SubOverrides);
+ auto HiddenIt = HiddenNamesMap.find(Owner);
+ if (HiddenIt != HiddenNamesMap.end()) {
+ HiddenNames &Hidden = HiddenIt->second;
+ HiddenMacrosMap::iterator HI = Hidden.HiddenMacros.find(II);
+ if (HI != Hidden.HiddenMacros.end()) {
+ // Register the macro now so we don't lose it when we re-export.
+ PP.appendMacroDirective(II, HI->second->import(PP, ImportLoc));
+
+ auto SubOverrides = HI->second->getOverriddenSubmodules();
+ Hidden.HiddenMacros.erase(HI);
+ removeOverriddenMacros(II, ImportLoc, Ambig, SubOverrides);
+ }
}
// If this macro is already in our list of conflicts, remove it from there.
@@ -1903,6 +1959,7 @@ void ASTReader::removeOverriddenMacros(IdentifierInfo *II,
ASTReader::AmbiguousMacros *
ASTReader::removeOverriddenMacros(IdentifierInfo *II,
+ SourceLocation ImportLoc,
ArrayRef<SubmoduleID> Overrides) {
MacroDirective *Prev = PP.getMacroDirective(II);
if (!Prev && Overrides.empty())
@@ -1915,7 +1972,7 @@ ASTReader::removeOverriddenMacros(IdentifierInfo *II,
AmbiguousMacros &Ambig = AmbiguousMacroDefs[II];
Ambig.push_back(PrevDef);
- removeOverriddenMacros(II, Ambig, Overrides);
+ removeOverriddenMacros(II, ImportLoc, Ambig, Overrides);
if (!Ambig.empty())
return &Ambig;
@@ -1927,7 +1984,7 @@ ASTReader::removeOverriddenMacros(IdentifierInfo *II,
if (PrevDef)
Ambig.push_back(PrevDef);
- removeOverriddenMacros(II, Ambig, Overrides);
+ removeOverriddenMacros(II, ImportLoc, Ambig, Overrides);
if (!Ambig.empty()) {
AmbiguousMacros &Result = AmbiguousMacroDefs[II];
@@ -1941,11 +1998,11 @@ ASTReader::removeOverriddenMacros(IdentifierInfo *II,
}
void ASTReader::installImportedMacro(IdentifierInfo *II, ModuleMacroInfo *MMI,
- Module *Owner, bool FromFinalization) {
+ Module *Owner) {
assert(II && Owner);
SourceLocation ImportLoc = Owner->MacroVisibilityLoc;
- if (ImportLoc.isInvalid() && !FromFinalization) {
+ if (ImportLoc.isInvalid()) {
// FIXME: If we made macros from this module visible but didn't provide a
// source location for the import, we don't have a location for the macro.
// Use the location at which the containing module file was first imported
@@ -1955,18 +2012,16 @@ void ASTReader::installImportedMacro(IdentifierInfo *II, ModuleMacroInfo *MMI,
}
AmbiguousMacros *Prev =
- removeOverriddenMacros(II, MMI->getOverriddenSubmodules());
+ removeOverriddenMacros(II, ImportLoc, MMI->getOverriddenSubmodules());
// Create a synthetic macro definition corresponding to the import (or null
// if this was an undefinition of the macro).
- DefMacroDirective *MD = MMI->import(PP, ImportLoc);
+ MacroDirective *Imported = MMI->import(PP, ImportLoc);
+ DefMacroDirective *MD = dyn_cast<DefMacroDirective>(Imported);
// If there's no ambiguity, just install the macro.
if (!Prev) {
- if (MD)
- PP.appendMacroDirective(II, MD);
- else
- PP.appendMacroDirective(II, PP.AllocateUndefMacroDirective(ImportLoc));
+ PP.appendMacroDirective(II, Imported);
return;
}
assert(!Prev->empty());
@@ -1974,10 +2029,14 @@ void ASTReader::installImportedMacro(IdentifierInfo *II, ModuleMacroInfo *MMI,
if (!MD) {
// We imported a #undef that didn't remove all prior definitions. The most
// recent prior definition remains, and we install it in the place of the
- // imported directive.
+ // imported directive, as if by a local #pragma pop_macro.
MacroInfo *NewMI = Prev->back()->getInfo();
Prev->pop_back();
- MD = PP.AllocateDefMacroDirective(NewMI, ImportLoc, /*Imported*/true);
+ MD = PP.AllocateDefMacroDirective(NewMI, ImportLoc);
+
+ // Install our #undef first so that we don't lose track of it. We'll replace
+ // this with whichever macro definition ends up winning.
+ PP.appendMacroDirective(II, Imported);
}
// We're introducing a macro definition that creates or adds to an ambiguity.
@@ -2035,14 +2094,14 @@ ASTReader::readInputFileInfo(ModuleFile &F, unsigned ID) {
off_t StoredSize;
time_t StoredTime;
bool Overridden;
-
+
assert(Record[0] == ID && "Bogus stored ID or offset");
StoredSize = static_cast<off_t>(Record[1]);
StoredTime = static_cast<time_t>(Record[2]);
Overridden = static_cast<bool>(Record[3]);
Filename = Blob;
- MaybeAddSystemRootToFilename(F, Filename);
-
+ ResolveImportedPath(F, Filename);
+
InputFileInfo R = { std::move(Filename), StoredSize, StoredTime, Overridden };
return R;
}
@@ -2128,12 +2187,23 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
bool IsOutOfDate = false;
// For an overridden file, there is nothing to validate.
- if (!Overridden && (StoredSize != File->getSize()
-#if !defined(LLVM_ON_WIN32)
+ if (!Overridden && //
+ (StoredSize != File->getSize() ||
+#if defined(LLVM_ON_WIN32)
+ false
+#else
// In our regression testing, the Windows file system seems to
// have inconsistent modification times that sometimes
// erroneously trigger this error-handling path.
- || StoredTime != File->getModificationTime()
+ //
+ // This also happens in networked file systems, so disable this
+ // check if validation is disabled or if we have an explicitly
+ // built PCM file.
+ //
+ // FIXME: Should we also do this for PCH files? They could also
+ // reasonably get shared across a network during a distributed build.
+ (StoredTime != File->getModificationTime() && !DisableValidation &&
+ F.Kind != MK_ExplicitModule)
#endif
)) {
if (Complain) {
@@ -2169,46 +2239,22 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
return IF;
}
-const FileEntry *ASTReader::getFileEntry(StringRef filenameStrRef) {
- ModuleFile &M = ModuleMgr.getPrimaryModule();
- std::string Filename = filenameStrRef;
- MaybeAddSystemRootToFilename(M, Filename);
- const FileEntry *File = FileMgr.getFile(Filename);
- if (File == nullptr && !M.OriginalDir.empty() && !CurrentDir.empty() &&
- M.OriginalDir != CurrentDir) {
- std::string resolved = resolveFileRelativeToOriginalDir(Filename,
- M.OriginalDir,
- CurrentDir);
- if (!resolved.empty())
- File = FileMgr.getFile(resolved);
- }
-
- return File;
+/// \brief If we are loading a relocatable PCH or module file, and the filename
+/// is not an absolute path, add the system or module root to the beginning of
+/// the file name.
+void ASTReader::ResolveImportedPath(ModuleFile &M, std::string &Filename) {
+ // Resolve relative to the base directory, if we have one.
+ if (!M.BaseDirectory.empty())
+ return ResolveImportedPath(Filename, M.BaseDirectory);
}
-/// \brief If we are loading a relocatable PCH file, and the filename is
-/// not an absolute path, add the system root to the beginning of the file
-/// name.
-void ASTReader::MaybeAddSystemRootToFilename(ModuleFile &M,
- std::string &Filename) {
- // If this is not a relocatable PCH file, there's nothing to do.
- if (!M.RelocatablePCH)
- return;
-
+void ASTReader::ResolveImportedPath(std::string &Filename, StringRef Prefix) {
if (Filename.empty() || llvm::sys::path::is_absolute(Filename))
return;
- if (isysroot.empty()) {
- // If no system root was given, default to '/'
- Filename.insert(Filename.begin(), '/');
- return;
- }
-
- unsigned Length = isysroot.size();
- if (isysroot[Length - 1] != '/')
- Filename.insert(Filename.begin(), '/');
-
- Filename.insert(Filename.begin(), isysroot.begin(), isysroot.end());
+ SmallString<128> Buffer;
+ llvm::sys::path::append(Buffer, Prefix, Filename);
+ Filename.assign(Buffer.begin(), Buffer.end());
}
ASTReader::ASTReadResult
@@ -2223,8 +2269,16 @@ ASTReader::ReadControlBlock(ModuleFile &F,
return Failure;
}
+ // Should we allow the configuration of the module file to differ from the
+ // configuration of the current translation unit in a compatible way?
+ //
+ // FIXME: Allow this for files explicitly specified with -include-pch too.
+ bool AllowCompatibleConfigurationMismatch = F.Kind == MK_ExplicitModule;
+
// Read all of the records and blocks in the control block.
RecordData Record;
+ unsigned NumInputs = 0;
+ unsigned NumUserInputs = 0;
while (1) {
llvm::BitstreamEntry Entry = Stream.advance();
@@ -2237,15 +2291,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
const HeaderSearchOptions &HSOpts =
PP.getHeaderSearchInfo().getHeaderSearchOpts();
- // All user input files reside at the index range [0, Record[1]), and
- // system input files reside at [Record[1], Record[0]).
- // Record is the one from INPUT_FILE_OFFSETS.
- unsigned NumInputs = Record[0];
- unsigned NumUserInputs = Record[1];
-
- if (!DisableValidation &&
- (ValidateSystemInputs || !HSOpts.ModulesValidateOncePerBuildSession ||
- F.InputFilesValidationTimestamp <= HSOpts.BuildSessionTimestamp)) {
+ // All user input files reside at the index range [0, NumUserInputs), and
+ // system input files reside at [NumUserInputs, NumInputs).
+ if (!DisableValidation) {
bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0;
// If we are reading a module, we will create a verification timestamp,
@@ -2254,7 +2302,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
unsigned N = NumUserInputs;
if (ValidateSystemInputs ||
- (HSOpts.ModulesValidateOncePerBuildSession && F.Kind == MK_Module))
+ (HSOpts.ModulesValidateOncePerBuildSession &&
+ F.InputFilesValidationTimestamp <= HSOpts.BuildSessionTimestamp &&
+ F.Kind == MK_ImplicitModule))
N = NumInputs;
for (unsigned I = 0; I < N; ++I) {
@@ -2324,6 +2374,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
}
F.RelocatablePCH = Record[4];
+ // Relative paths in a relocatable PCH are relative to our sysroot.
+ if (F.RelocatablePCH)
+ F.BaseDirectory = isysroot.empty() ? "/" : isysroot;
const std::string &CurBranch = getClangFullRepositoryVersion();
StringRef ASTBranch = Blob;
@@ -2335,6 +2388,11 @@ ASTReader::ReadControlBlock(ModuleFile &F,
break;
}
+ case SIGNATURE:
+ assert((!F.Signature || F.Signature == Record[0]) && "signature changed");
+ F.Signature = Record[0];
+ break;
+
case IMPORTS: {
// Load each of the imported PCH files.
unsigned Idx = 0, N = Record.size();
@@ -2348,14 +2406,12 @@ ASTReader::ReadControlBlock(ModuleFile &F,
SourceLocation::getFromRawEncoding(Record[Idx++]);
off_t StoredSize = (off_t)Record[Idx++];
time_t StoredModTime = (time_t)Record[Idx++];
- unsigned Length = Record[Idx++];
- SmallString<128> ImportedFile(Record.begin() + Idx,
- Record.begin() + Idx + Length);
- Idx += Length;
+ ASTFileSignature StoredSignature = Record[Idx++];
+ auto ImportedFile = ReadPath(F, Record, Idx);
// Load the AST file.
switch(ReadASTCore(ImportedFile, ImportedKind, ImportLoc, &F, Loaded,
- StoredSize, StoredModTime,
+ StoredSize, StoredModTime, StoredSignature,
ClientLoadCapabilities)) {
case Failure: return Failure;
// If we have to ignore the dependency, we'll have to ignore this too.
@@ -2372,8 +2428,10 @@ ASTReader::ReadControlBlock(ModuleFile &F,
case LANGUAGE_OPTIONS: {
bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ // FIXME: The &F == *ModuleMgr.begin() check is wrong for modules.
if (Listener && &F == *ModuleMgr.begin() &&
- ParseLanguageOptions(Record, Complain, *Listener) &&
+ ParseLanguageOptions(Record, Complain, *Listener,
+ AllowCompatibleConfigurationMismatch) &&
!DisableValidation && !AllowConfigurationMismatch)
return ConfigurationMismatch;
break;
@@ -2391,6 +2449,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
case DIAGNOSTIC_OPTIONS: {
bool Complain = (ClientLoadCapabilities & ARR_OutOfDate)==0;
if (Listener && &F == *ModuleMgr.begin() &&
+ !AllowCompatibleConfigurationMismatch &&
ParseDiagnosticOptions(Record, Complain, *Listener) &&
!DisableValidation)
return OutOfDate;
@@ -2400,6 +2459,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
case FILE_SYSTEM_OPTIONS: {
bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
if (Listener && &F == *ModuleMgr.begin() &&
+ !AllowCompatibleConfigurationMismatch &&
ParseFileSystemOptions(Record, Complain, *Listener) &&
!DisableValidation && !AllowConfigurationMismatch)
return ConfigurationMismatch;
@@ -2409,6 +2469,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
case HEADER_SEARCH_OPTIONS: {
bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
if (Listener && &F == *ModuleMgr.begin() &&
+ !AllowCompatibleConfigurationMismatch &&
ParseHeaderSearchOptions(Record, Complain, *Listener) &&
!DisableValidation && !AllowConfigurationMismatch)
return ConfigurationMismatch;
@@ -2418,6 +2479,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
case PREPROCESSOR_OPTIONS: {
bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
if (Listener && &F == *ModuleMgr.begin() &&
+ !AllowCompatibleConfigurationMismatch &&
ParsePreprocessorOptions(Record, Complain, *Listener,
SuggestedPredefines) &&
!DisableValidation && !AllowConfigurationMismatch)
@@ -2429,7 +2491,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
F.OriginalSourceFileID = FileID::get(Record[0]);
F.ActualOriginalSourceFileName = Blob;
F.OriginalSourceFileName = F.ActualOriginalSourceFileName;
- MaybeAddSystemRootToFilename(F, F.OriginalSourceFileName);
+ ResolveImportedPath(F, F.OriginalSourceFileName);
break;
case ORIGINAL_FILE_ID:
@@ -2446,46 +2508,43 @@ ASTReader::ReadControlBlock(ModuleFile &F,
Listener->ReadModuleName(F.ModuleName);
break;
- case MODULE_MAP_FILE:
- F.ModuleMapPath = Blob;
-
- // Try to resolve ModuleName in the current header search context and
- // verify that it is found in the same module map file as we saved. If the
- // top-level AST file is a main file, skip this check because there is no
- // usable header search context.
+ case MODULE_DIRECTORY: {
assert(!F.ModuleName.empty() &&
- "MODULE_NAME should come before MOUDLE_MAP_FILE");
- if (F.Kind == MK_Module &&
- (*ModuleMgr.begin())->Kind != MK_MainFile) {
- Module *M = PP.getHeaderSearchInfo().lookupModule(F.ModuleName);
- if (!M) {
- assert(ImportedBy && "top-level import should be verified");
- if ((ClientLoadCapabilities & ARR_Missing) == 0)
- Diag(diag::err_imported_module_not_found)
- << F.ModuleName << ImportedBy->FileName;
- return Missing;
- }
-
- const FileEntry *StoredModMap = FileMgr.getFile(F.ModuleMapPath);
- if (StoredModMap == nullptr || StoredModMap != M->ModuleMap) {
- assert(M->ModuleMap && "found module is missing module map file");
- assert(M->Name == F.ModuleName && "found module with different name");
- assert(ImportedBy && "top-level import should be verified");
- if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
- Diag(diag::err_imported_module_modmap_changed)
- << F.ModuleName << ImportedBy->FileName
- << M->ModuleMap->getName() << F.ModuleMapPath;
- return OutOfDate;
+ "MODULE_DIRECTORY found before MODULE_NAME");
+ // If we've already loaded a module map file covering this module, we may
+ // have a better path for it (relative to the current build).
+ Module *M = PP.getHeaderSearchInfo().lookupModule(F.ModuleName);
+ if (M && M->Directory) {
+ // If we're implicitly loading a module, the base directory can't
+ // change between the build and use.
+ if (F.Kind != MK_ExplicitModule) {
+ const DirectoryEntry *BuildDir =
+ PP.getFileManager().getDirectory(Blob);
+ if (!BuildDir || BuildDir != M->Directory) {
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Diag(diag::err_imported_module_relocated)
+ << F.ModuleName << Blob << M->Directory->getName();
+ return OutOfDate;
+ }
}
+ F.BaseDirectory = M->Directory->getName();
+ } else {
+ F.BaseDirectory = Blob;
}
+ break;
+ }
- if (Listener)
- Listener->ReadModuleMapFile(F.ModuleMapPath);
+ case MODULE_MAP_FILE:
+ if (ASTReadResult Result =
+ ReadModuleMapFileBlock(Record, F, ImportedBy, ClientLoadCapabilities))
+ return Result;
break;
case INPUT_FILE_OFFSETS:
+ NumInputs = Record[0];
+ NumUserInputs = Record[1];
F.InputFileOffsets = (const uint32_t *)Blob.data();
- F.InputFilesLoaded.resize(Record[0]);
+ F.InputFilesLoaded.resize(NumInputs);
break;
}
}
@@ -2628,7 +2687,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.TypeRemap.insertOrReplace(
std::make_pair(LocalBaseTypeIndex,
F.BaseTypeIndex - LocalBaseTypeIndex));
-
+
TypesLoaded.resize(TypesLoaded.size() + F.LocalNumTypes);
}
break;
@@ -2658,7 +2717,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// Introduce the global -> local mapping for declarations within this
// module.
F.GlobalToLocalDeclIDs[&F] = LocalBaseDeclID;
-
+
DeclsLoaded.resize(DeclsLoaded.size() + F.LocalNumDecls);
}
break;
@@ -2687,7 +2746,6 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
auto *DC = cast<DeclContext>(D);
DC->getPrimaryContext()->setHasExternalVisibleStorage(true);
auto *&LookupTable = F.DeclContextInfos[DC].NameLookupTableData;
- // FIXME: There should never be an existing lookup table.
delete LookupTable;
LookupTable = Table;
} else
@@ -2729,8 +2787,8 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.IdentifierRemap.insertOrReplace(
std::make_pair(LocalBaseIdentifierID,
F.BaseIdentifierID - LocalBaseIdentifierID));
-
- IdentifiersLoaded.resize(IdentifiersLoaded.size()
+
+ IdentifiersLoaded.resize(IdentifiersLoaded.size()
+ F.LocalNumIdentifiers);
}
break;
@@ -2823,7 +2881,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
std::make_pair(LocalBaseSelectorID,
F.BaseSelectorID - LocalBaseSelectorID));
- SelectorsLoaded.resize(SelectorsLoaded.size() + F.LocalNumSelectors);
+ SelectorsLoaded.resize(SelectorsLoaded.size() + F.LocalNumSelectors);
}
break;
}
@@ -2904,19 +2962,16 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
// Continuous range maps we may be updating in our module.
- ContinuousRangeMap<uint32_t, int, 2>::Builder SLocRemap(F.SLocRemap);
- ContinuousRangeMap<uint32_t, int, 2>::Builder
- IdentifierRemap(F.IdentifierRemap);
- ContinuousRangeMap<uint32_t, int, 2>::Builder
- MacroRemap(F.MacroRemap);
- ContinuousRangeMap<uint32_t, int, 2>::Builder
- PreprocessedEntityRemap(F.PreprocessedEntityRemap);
- ContinuousRangeMap<uint32_t, int, 2>::Builder
- SubmoduleRemap(F.SubmoduleRemap);
- ContinuousRangeMap<uint32_t, int, 2>::Builder
- SelectorRemap(F.SelectorRemap);
- ContinuousRangeMap<uint32_t, int, 2>::Builder DeclRemap(F.DeclRemap);
- ContinuousRangeMap<uint32_t, int, 2>::Builder TypeRemap(F.TypeRemap);
+ typedef ContinuousRangeMap<uint32_t, int, 2>::Builder
+ RemapBuilder;
+ RemapBuilder SLocRemap(F.SLocRemap);
+ RemapBuilder IdentifierRemap(F.IdentifierRemap);
+ RemapBuilder MacroRemap(F.MacroRemap);
+ RemapBuilder PreprocessedEntityRemap(F.PreprocessedEntityRemap);
+ RemapBuilder SubmoduleRemap(F.SubmoduleRemap);
+ RemapBuilder SelectorRemap(F.SelectorRemap);
+ RemapBuilder DeclRemap(F.DeclRemap);
+ RemapBuilder TypeRemap(F.TypeRemap);
while(Data < DataEnd) {
using namespace llvm::support;
@@ -2946,26 +3001,23 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
uint32_t TypeIndexOffset =
endian::readNext<uint32_t, little, unaligned>(Data);
- // Source location offset is mapped to OM->SLocEntryBaseOffset.
- SLocRemap.insert(std::make_pair(SLocOffset,
- static_cast<int>(OM->SLocEntryBaseOffset - SLocOffset)));
- IdentifierRemap.insert(
- std::make_pair(IdentifierIDOffset,
- OM->BaseIdentifierID - IdentifierIDOffset));
- MacroRemap.insert(std::make_pair(MacroIDOffset,
- OM->BaseMacroID - MacroIDOffset));
- PreprocessedEntityRemap.insert(
- std::make_pair(PreprocessedEntityIDOffset,
- OM->BasePreprocessedEntityID - PreprocessedEntityIDOffset));
- SubmoduleRemap.insert(std::make_pair(SubmoduleIDOffset,
- OM->BaseSubmoduleID - SubmoduleIDOffset));
- SelectorRemap.insert(std::make_pair(SelectorIDOffset,
- OM->BaseSelectorID - SelectorIDOffset));
- DeclRemap.insert(std::make_pair(DeclIDOffset,
- OM->BaseDeclID - DeclIDOffset));
-
- TypeRemap.insert(std::make_pair(TypeIndexOffset,
- OM->BaseTypeIndex - TypeIndexOffset));
+ uint32_t None = std::numeric_limits<uint32_t>::max();
+
+ auto mapOffset = [&](uint32_t Offset, uint32_t BaseOffset,
+ RemapBuilder &Remap) {
+ if (Offset != None)
+ Remap.insert(std::make_pair(Offset,
+ static_cast<int>(BaseOffset - Offset)));
+ };
+ mapOffset(SLocOffset, OM->SLocEntryBaseOffset, SLocRemap);
+ mapOffset(IdentifierIDOffset, OM->BaseIdentifierID, IdentifierRemap);
+ mapOffset(MacroIDOffset, OM->BaseMacroID, MacroRemap);
+ mapOffset(PreprocessedEntityIDOffset, OM->BasePreprocessedEntityID,
+ PreprocessedEntityRemap);
+ mapOffset(SubmoduleIDOffset, OM->BaseSubmoduleID, SubmoduleRemap);
+ mapOffset(SelectorIDOffset, OM->BaseSelectorID, SelectorRemap);
+ mapOffset(DeclIDOffset, OM->BaseDeclID, DeclRemap);
+ mapOffset(TypeIndexOffset, OM->BaseTypeIndex, TypeRemap);
// Global -> local mappings.
F.GlobalToLocalDeclIDs[OM] = DeclIDOffset;
@@ -3206,7 +3258,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case IMPORTED_MODULES: {
- if (F.Kind != MK_Module) {
+ if (F.Kind != MK_ImplicitModule && F.Kind != MK_ExplicitModule) {
// If we aren't loading a module (which has its own exports), make
// all of the imported modules visible.
// FIXME: Deal with macros-only imports.
@@ -3287,10 +3339,110 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
OptimizeOffPragmaLocation = ReadSourceLocation(F, Record[0]);
break;
+
+ case UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ UnusedLocalTypedefNameCandidates.push_back(
+ getGlobalDeclID(F, Record[I]));
+ break;
}
}
}
+ASTReader::ASTReadResult
+ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
+ const ModuleFile *ImportedBy,
+ unsigned ClientLoadCapabilities) {
+ unsigned Idx = 0;
+ F.ModuleMapPath = ReadPath(F, Record, Idx);
+
+ if (F.Kind == MK_ExplicitModule) {
+ // For an explicitly-loaded module, we don't care whether the original
+ // module map file exists or matches.
+ return Success;
+ }
+
+ // Try to resolve ModuleName in the current header search context and
+ // verify that it is found in the same module map file as we saved. If the
+ // top-level AST file is a main file, skip this check because there is no
+ // usable header search context.
+ assert(!F.ModuleName.empty() &&
+ "MODULE_NAME should come before MODULE_MAP_FILE");
+ if (F.Kind == MK_ImplicitModule &&
+ (*ModuleMgr.begin())->Kind != MK_MainFile) {
+ // An implicitly-loaded module file should have its module listed in some
+ // module map file that we've already loaded.
+ Module *M = PP.getHeaderSearchInfo().lookupModule(F.ModuleName);
+ auto &Map = PP.getHeaderSearchInfo().getModuleMap();
+ const FileEntry *ModMap = M ? Map.getModuleMapFileForUniquing(M) : nullptr;
+ if (!ModMap) {
+ assert(ImportedBy && "top-level import should be verified");
+ if ((ClientLoadCapabilities & ARR_Missing) == 0)
+ Diag(diag::err_imported_module_not_found) << F.ModuleName << F.FileName
+ << ImportedBy->FileName
+ << F.ModuleMapPath;
+ return Missing;
+ }
+
+ assert(M->Name == F.ModuleName && "found module with different name");
+
+ // Check the primary module map file.
+ const FileEntry *StoredModMap = FileMgr.getFile(F.ModuleMapPath);
+ if (StoredModMap == nullptr || StoredModMap != ModMap) {
+ assert(ModMap && "found module is missing module map file");
+ assert(ImportedBy && "top-level import should be verified");
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Diag(diag::err_imported_module_modmap_changed)
+ << F.ModuleName << ImportedBy->FileName
+ << ModMap->getName() << F.ModuleMapPath;
+ return OutOfDate;
+ }
+
+ llvm::SmallPtrSet<const FileEntry *, 1> AdditionalStoredMaps;
+ for (unsigned I = 0, N = Record[Idx++]; I < N; ++I) {
+ // FIXME: we should use input files rather than storing names.
+ std::string Filename = ReadPath(F, Record, Idx);
+ const FileEntry *F =
+ FileMgr.getFile(Filename, false, false);
+ if (F == nullptr) {
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Error("could not find file '" + Filename +"' referenced by AST file");
+ return OutOfDate;
+ }
+ AdditionalStoredMaps.insert(F);
+ }
+
+ // Check any additional module map files (e.g. module.private.modulemap)
+ // that are not in the pcm.
+ if (auto *AdditionalModuleMaps = Map.getAdditionalModuleMapFiles(M)) {
+ for (const FileEntry *ModMap : *AdditionalModuleMaps) {
+ // Remove files that match
+ // Note: SmallPtrSet::erase is really remove
+ if (!AdditionalStoredMaps.erase(ModMap)) {
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Diag(diag::err_module_different_modmap)
+ << F.ModuleName << /*new*/0 << ModMap->getName();
+ return OutOfDate;
+ }
+ }
+ }
+
+ // Check any additional module map files that are in the pcm, but not
+ // found in header search. Cases that match are already removed.
+ for (const FileEntry *ModMap : AdditionalStoredMaps) {
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Diag(diag::err_module_different_modmap)
+ << F.ModuleName << /*not new*/1 << ModMap->getName();
+ return OutOfDate;
+ }
+ }
+
+ if (Listener)
+ Listener->ReadModuleMapFile(F.ModuleMapPath);
+ return Success;
+}
+
+
/// \brief Move the given method to the back of the global list of methods.
static void moveMethodToBackOfGlobalList(Sema &S, ObjCMethodDecl *Method) {
// Find the entry for this selector in the method pool.
@@ -3305,7 +3457,7 @@ static void moveMethodToBackOfGlobalList(Sema &S, ObjCMethodDecl *Method) {
bool Found = false;
for (ObjCMethodList *List = &Start; List; List = List->getNext()) {
if (!Found) {
- if (List->Method == Method) {
+ if (List->getMethod() == Method) {
Found = true;
} else {
// Keep searching.
@@ -3314,9 +3466,9 @@ static void moveMethodToBackOfGlobalList(Sema &S, ObjCMethodDecl *Method) {
}
if (List->getNext())
- List->Method = List->getNext()->Method;
+ List->setMethod(List->getNext()->getMethod());
else
- List->Method = Method;
+ List->setMethod(Method);
}
}
@@ -3336,8 +3488,13 @@ void ASTReader::makeNamesVisible(const HiddenNames &Names, Module *Owner,
assert((FromFinalization || Owner->NameVisibility >= Module::MacrosVisible) &&
"nothing to make visible?");
- for (const auto &Macro : Names.HiddenMacros)
- installImportedMacro(Macro.first, Macro.second, Owner, FromFinalization);
+ for (const auto &Macro : Names.HiddenMacros) {
+ if (FromFinalization)
+ PP.appendMacroDirective(Macro.first,
+ Macro.second->import(PP, SourceLocation()));
+ else
+ installImportedMacro(Macro.first, Macro.second, Owner);
+ }
}
void ASTReader::makeModuleVisible(Module *Mod,
@@ -3385,7 +3542,7 @@ void ASTReader::makeModuleVisible(Module *Mod,
for (SmallVectorImpl<Module *>::iterator
I = Exports.begin(), E = Exports.end(); I != E; ++I) {
Module *Exported = *I;
- if (Visited.insert(Exported))
+ if (Visited.insert(Exported).second)
Stack.push_back(Exported);
}
@@ -3435,10 +3592,9 @@ bool ASTReader::isGlobalIndexUnavailable() const {
static void updateModuleTimestamp(ModuleFile &MF) {
// Overwrite the timestamp file contents so that file's mtime changes.
std::string TimestampFilename = MF.getTimestampFilename();
- std::string ErrorInfo;
- llvm::raw_fd_ostream OS(TimestampFilename.c_str(), ErrorInfo,
- llvm::sys::fs::F_Text);
- if (!ErrorInfo.empty())
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(TimestampFilename, EC, llvm::sys::fs::F_Text);
+ if (EC)
return;
OS << "Timestamp file\n";
}
@@ -3460,7 +3616,7 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
SmallVector<ImportedModule, 4> Loaded;
switch(ASTReadResult ReadResult = ReadASTCore(FileName, Type, ImportLoc,
/*ImportedBy=*/nullptr, Loaded,
- 0, 0,
+ 0, 0, 0,
ClientLoadCapabilities)) {
case Failure:
case Missing:
@@ -3618,7 +3774,7 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
// in the filesystem).
for (unsigned I = 0, N = Loaded.size(); I != N; ++I) {
ImportedModule &M = Loaded[I];
- if (M.Mod->Kind == MK_Module) {
+ if (M.Mod->Kind == MK_ImplicitModule) {
updateModuleTimestamp(*M.Mod);
}
}
@@ -3627,6 +3783,8 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
return Success;
}
+static ASTFileSignature readASTFileSignature(llvm::BitstreamReader &StreamFile);
+
ASTReader::ASTReadResult
ASTReader::ReadASTCore(StringRef FileName,
ModuleKind Type,
@@ -3634,12 +3792,14 @@ ASTReader::ReadASTCore(StringRef FileName,
ModuleFile *ImportedBy,
SmallVectorImpl<ImportedModule> &Loaded,
off_t ExpectedSize, time_t ExpectedModTime,
+ ASTFileSignature ExpectedSignature,
unsigned ClientLoadCapabilities) {
ModuleFile *M;
std::string ErrorStr;
ModuleManager::AddModuleResult AddResult
= ModuleMgr.addModule(FileName, Type, ImportLoc, ImportedBy,
getGeneration(), ExpectedSize, ExpectedModTime,
+ ExpectedSignature, readASTFileSignature,
M, ErrorStr);
switch (AddResult) {
@@ -3651,7 +3811,7 @@ ASTReader::ReadASTCore(StringRef FileName,
break;
case ModuleManager::Missing:
- // The module file was missing; if the client handle handle, that, return
+ // The module file was missing; if the client can handle that, return
// it.
if (ClientLoadCapabilities & ARR_Missing)
return Missing;
@@ -3690,7 +3850,7 @@ ASTReader::ReadASTCore(StringRef FileName,
ModuleFile &F = *M;
BitstreamCursor &Stream = F.Stream;
- Stream.init(F.StreamFile);
+ Stream.init(&F.StreamFile);
F.SizeInBits = F.Buffer->getBufferSize() * 8;
// Sniff for the signature.
@@ -3937,6 +4097,34 @@ static bool SkipCursorToBlock(BitstreamCursor &Cursor, unsigned BlockID) {
}
}
+static ASTFileSignature readASTFileSignature(llvm::BitstreamReader &StreamFile){
+ BitstreamCursor Stream(StreamFile);
+ if (Stream.Read(8) != 'C' ||
+ Stream.Read(8) != 'P' ||
+ Stream.Read(8) != 'C' ||
+ Stream.Read(8) != 'H') {
+ return 0;
+ }
+
+ // Scan for the CONTROL_BLOCK_ID block.
+ if (SkipCursorToBlock(Stream, CONTROL_BLOCK_ID))
+ return 0;
+
+ // Scan for SIGNATURE inside the control block.
+ ASTReader::RecordData Record;
+ while (1) {
+ llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+ if (Entry.Kind == llvm::BitstreamEntry::EndBlock ||
+ Entry.Kind != llvm::BitstreamEntry::Record)
+ return 0;
+
+ Record.clear();
+ StringRef Blob;
+ if (SIGNATURE == Stream.readRecord(Entry.ID, Record, &Blob))
+ return Record[0];
+ }
+}
+
/// \brief Retrieve the name of the original source file name
/// directly from the AST file, without actually loading the AST
/// file.
@@ -3944,20 +4132,18 @@ std::string ASTReader::getOriginalSourceFile(const std::string &ASTFileName,
FileManager &FileMgr,
DiagnosticsEngine &Diags) {
// Open the AST file.
- std::string ErrStr;
- std::unique_ptr<llvm::MemoryBuffer> Buffer;
- Buffer.reset(FileMgr.getBufferForFile(ASTFileName, &ErrStr));
+ auto Buffer = FileMgr.getBufferForFile(ASTFileName);
if (!Buffer) {
- Diags.Report(diag::err_fe_unable_to_read_pch_file) << ASTFileName << ErrStr;
+ Diags.Report(diag::err_fe_unable_to_read_pch_file)
+ << ASTFileName << Buffer.getError().message();
return std::string();
}
// Initialize the stream
llvm::BitstreamReader StreamFile;
- BitstreamCursor Stream;
- StreamFile.init((const unsigned char *)Buffer->getBufferStart(),
- (const unsigned char *)Buffer->getBufferEnd());
- Stream.init(StreamFile);
+ StreamFile.init((const unsigned char *)(*Buffer)->getBufferStart(),
+ (const unsigned char *)(*Buffer)->getBufferEnd());
+ BitstreamCursor Stream(StreamFile);
// Sniff for the signature.
if (Stream.Read(8) != 'C' ||
@@ -4012,9 +4198,10 @@ namespace {
{
}
- bool ReadLanguageOptions(const LangOptions &LangOpts,
- bool Complain) override {
- return checkLanguageOptions(ExistingLangOpts, LangOpts, nullptr);
+ bool ReadLanguageOptions(const LangOptions &LangOpts, bool Complain,
+ bool AllowCompatibleDifferences) override {
+ return checkLanguageOptions(ExistingLangOpts, LangOpts, nullptr,
+ AllowCompatibleDifferences);
}
bool ReadTargetOptions(const TargetOptions &TargetOpts,
bool Complain) override {
@@ -4033,19 +4220,16 @@ bool ASTReader::readASTFileControlBlock(StringRef Filename,
FileManager &FileMgr,
ASTReaderListener &Listener) {
// Open the AST file.
- std::string ErrStr;
- std::unique_ptr<llvm::MemoryBuffer> Buffer;
- Buffer.reset(FileMgr.getBufferForFile(Filename, &ErrStr));
+ auto Buffer = FileMgr.getBufferForFile(Filename);
if (!Buffer) {
return true;
}
// Initialize the stream
llvm::BitstreamReader StreamFile;
- BitstreamCursor Stream;
- StreamFile.init((const unsigned char *)Buffer->getBufferStart(),
- (const unsigned char *)Buffer->getBufferEnd());
- Stream.init(StreamFile);
+ StreamFile.init((const unsigned char *)(*Buffer)->getBufferStart(),
+ (const unsigned char *)(*Buffer)->getBufferEnd());
+ BitstreamCursor Stream(StreamFile);
// Sniff for the signature.
if (Stream.Read(8) != 'C' ||
@@ -4061,6 +4245,7 @@ bool ASTReader::readASTFileControlBlock(StringRef Filename,
bool NeedsInputFiles = Listener.needsInputFileVisitation();
bool NeedsSystemInputFiles = Listener.needsSystemInputFileVisitation();
+ bool NeedsImports = Listener.needsImportVisitation();
BitstreamCursor InputFilesCursor;
if (NeedsInputFiles) {
InputFilesCursor = Stream;
@@ -4083,6 +4268,7 @@ bool ASTReader::readASTFileControlBlock(StringRef Filename,
// Scan for ORIGINAL_FILE inside the control block.
RecordData Record;
+ std::string ModuleDir;
while (1) {
llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
if (Entry.Kind == llvm::BitstreamEntry::EndBlock)
@@ -4107,11 +4293,19 @@ bool ASTReader::readASTFileControlBlock(StringRef Filename,
case MODULE_NAME:
Listener.ReadModuleName(Blob);
break;
- case MODULE_MAP_FILE:
- Listener.ReadModuleMapFile(Blob);
+ case MODULE_DIRECTORY:
+ ModuleDir = Blob;
break;
+ case MODULE_MAP_FILE: {
+ unsigned Idx = 0;
+ auto Path = ReadString(Record, Idx);
+ ResolveImportedPath(Path, ModuleDir);
+ Listener.ReadModuleMapFile(Path);
+ break;
+ }
case LANGUAGE_OPTIONS:
- if (ParseLanguageOptions(Record, false, Listener))
+ if (ParseLanguageOptions(Record, false, Listener,
+ /*AllowCompatibleConfigurationMismatch*/false))
return true;
break;
@@ -4168,7 +4362,10 @@ bool ASTReader::readASTFileControlBlock(StringRef Filename,
switch ((InputFileRecordTypes)Cursor.readRecord(Code, Record, &Blob)) {
case INPUT_FILE:
bool Overridden = static_cast<bool>(Record[3]);
- shouldContinue = Listener.visitInputFile(Blob, isSystemFile, Overridden);
+ std::string Filename = Blob;
+ ResolveImportedPath(Filename, ModuleDir);
+ shouldContinue =
+ Listener.visitInputFile(Filename, isSystemFile, Overridden);
break;
}
if (!shouldContinue)
@@ -4177,6 +4374,21 @@ bool ASTReader::readASTFileControlBlock(StringRef Filename,
break;
}
+ case IMPORTS: {
+ if (!NeedsImports)
+ break;
+
+ unsigned Idx = 0, N = Record.size();
+ while (Idx < N) {
+ // Read information about the AST file.
+ Idx += 5; // ImportLoc, Size, ModTime, Signature
+ std::string Filename = ReadString(Record, Idx);
+ ResolveImportedPath(Filename, ModuleDir);
+ Listener.visitImport(Filename);
+ }
+ break;
+ }
+
default:
// No other validation to perform.
break;
@@ -4224,21 +4436,30 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// Read a record.
StringRef Blob;
Record.clear();
- switch (F.Stream.readRecord(Entry.ID, Record, &Blob)) {
+ auto Kind = F.Stream.readRecord(Entry.ID, Record, &Blob);
+
+ if ((Kind == SUBMODULE_METADATA) != First) {
+ Error("submodule metadata record should be at beginning of block");
+ return Failure;
+ }
+ First = false;
+
+ // Submodule information is only valid if we have a current module.
+ // FIXME: Should we error on these cases?
+ if (!CurrentModule && Kind != SUBMODULE_METADATA &&
+ Kind != SUBMODULE_DEFINITION)
+ continue;
+
+ switch (Kind) {
default: // Default behavior: ignore.
break;
-
- case SUBMODULE_DEFINITION: {
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
+ case SUBMODULE_DEFINITION: {
if (Record.size() < 8) {
Error("malformed module definition");
return Failure;
}
-
+
StringRef Name = Blob;
unsigned Idx = 0;
SubmoduleID GlobalID = getGlobalSubmoduleID(F, Record[Idx++]);
@@ -4253,20 +4474,17 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
bool ConfigMacrosExhaustive = Record[Idx++];
Module *ParentModule = nullptr;
- const FileEntry *ModuleMap = nullptr;
- if (Parent) {
+ if (Parent)
ParentModule = getSubmodule(Parent);
- ModuleMap = ParentModule->ModuleMap;
- }
-
- if (!F.ModuleMapPath.empty())
- ModuleMap = FileMgr.getFile(F.ModuleMapPath);
// Retrieve this (sub)module from the module map, creating it if
// necessary.
- CurrentModule = ModMap.findOrCreateModule(Name, ParentModule, ModuleMap,
- IsFramework,
+ CurrentModule = ModMap.findOrCreateModule(Name, ParentModule, IsFramework,
IsExplicit).first;
+
+ // FIXME: set the definition loc for CurrentModule, or call
+ // ModMap.setInferredModuleAllowedBy()
+
SubmoduleID GlobalIndex = GlobalID - NUM_PREDEF_SUBMODULE_IDS;
if (GlobalIndex >= SubmodulesLoaded.size() ||
SubmodulesLoaded[GlobalIndex]) {
@@ -4311,14 +4529,6 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
case SUBMODULE_UMBRELLA_HEADER: {
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
-
- if (!CurrentModule)
- break;
-
if (const FileEntry *Umbrella = PP.getFileManager().getFile(Blob)) {
if (!CurrentModule->getUmbrellaHeader())
ModMap.setUmbrellaHeader(CurrentModule, Umbrella);
@@ -4331,73 +4541,26 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
}
- case SUBMODULE_HEADER: {
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
-
- if (!CurrentModule)
- break;
-
- // We lazily associate headers with their modules via the HeaderInfoTable.
- // FIXME: Re-evaluate this section; maybe only store InputFile IDs instead
- // of complete filenames or remove it entirely.
- break;
- }
-
- case SUBMODULE_EXCLUDED_HEADER: {
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
-
- if (!CurrentModule)
- break;
-
- // We lazily associate headers with their modules via the HeaderInfoTable.
+ case SUBMODULE_HEADER:
+ case SUBMODULE_EXCLUDED_HEADER:
+ case SUBMODULE_PRIVATE_HEADER:
+ // We lazily associate headers with their modules via the HeaderInfo table.
// FIXME: Re-evaluate this section; maybe only store InputFile IDs instead
// of complete filenames or remove it entirely.
- break;
- }
-
- case SUBMODULE_PRIVATE_HEADER: {
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
+ break;
- if (!CurrentModule)
- break;
-
- // We lazily associate headers with their modules via the HeaderInfoTable.
- // FIXME: Re-evaluate this section; maybe only store InputFile IDs instead
- // of complete filenames or remove it entirely.
- break;
- }
+ case SUBMODULE_TEXTUAL_HEADER:
+ case SUBMODULE_PRIVATE_TEXTUAL_HEADER:
+ // FIXME: Textual headers are not marked in the HeaderInfo table. Load
+ // them here.
+ break;
case SUBMODULE_TOPHEADER: {
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
-
- if (!CurrentModule)
- break;
-
CurrentModule->addTopHeaderFilename(Blob);
break;
}
case SUBMODULE_UMBRELLA_DIR: {
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
-
- if (!CurrentModule)
- break;
-
if (const DirectoryEntry *Umbrella
= PP.getFileManager().getDirectory(Blob)) {
if (!CurrentModule->getUmbrellaDir())
@@ -4412,12 +4575,6 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
case SUBMODULE_METADATA: {
- if (!First) {
- Error("submodule metadata record not at beginning of block");
- return Failure;
- }
- First = false;
-
F.BaseSubmoduleID = getTotalNumSubmodules();
F.LocalNumSubmodules = Record[0];
unsigned LocalBaseSubmoduleID = Record[1];
@@ -4431,21 +4588,13 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.SubmoduleRemap.insertOrReplace(
std::make_pair(LocalBaseSubmoduleID,
F.BaseSubmoduleID - LocalBaseSubmoduleID));
-
+
SubmodulesLoaded.resize(SubmodulesLoaded.size() + F.LocalNumSubmodules);
- }
+ }
break;
}
case SUBMODULE_IMPORTS: {
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
-
- if (!CurrentModule)
- break;
-
for (unsigned Idx = 0; Idx != Record.size(); ++Idx) {
UnresolvedModuleRef Unresolved;
Unresolved.File = &F;
@@ -4459,14 +4608,6 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
case SUBMODULE_EXPORTS: {
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
-
- if (!CurrentModule)
- break;
-
for (unsigned Idx = 0; Idx + 1 < Record.size(); Idx += 2) {
UnresolvedModuleRef Unresolved;
Unresolved.File = &F;
@@ -4483,53 +4624,21 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
}
case SUBMODULE_REQUIRES: {
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
-
- if (!CurrentModule)
- break;
-
CurrentModule->addRequirement(Blob, Record[0], Context.getLangOpts(),
Context.getTargetInfo());
break;
}
case SUBMODULE_LINK_LIBRARY:
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
-
- if (!CurrentModule)
- break;
-
CurrentModule->LinkLibraries.push_back(
Module::LinkLibrary(Blob, Record[0]));
break;
case SUBMODULE_CONFIG_MACRO:
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
-
- if (!CurrentModule)
- break;
-
CurrentModule->ConfigMacros.push_back(Blob.str());
break;
case SUBMODULE_CONFLICT: {
- if (First) {
- Error("missing submodule metadata record at beginning of block");
- return Failure;
- }
-
- if (!CurrentModule)
- break;
-
UnresolvedModuleRef Unresolved;
Unresolved.File = &F;
Unresolved.Mod = CurrentModule;
@@ -4553,7 +4662,8 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
/// \returns true if the listener deems the file unacceptable, false otherwise.
bool ASTReader::ParseLanguageOptions(const RecordData &Record,
bool Complain,
- ASTReaderListener &Listener) {
+ ASTReaderListener &Listener,
+ bool AllowCompatibleDifferences) {
LangOptions LangOpts;
unsigned Idx = 0;
#define LANGOPT(Name, Bits, Default, Description) \
@@ -4561,7 +4671,8 @@ bool ASTReader::ParseLanguageOptions(const RecordData &Record,
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
LangOpts.set##Name(static_cast<LangOptions::Type>(Record[Idx++]));
#include "clang/Basic/LangOptions.def"
-#define SANITIZER(NAME, ID) LangOpts.Sanitize.ID = Record[Idx++];
+#define SANITIZER(NAME, ID) \
+ LangOpts.Sanitize.set(SanitizerKind::ID, Record[Idx++]);
#include "clang/Basic/Sanitizers.def"
ObjCRuntime::Kind runtimeKind = (ObjCRuntime::Kind) Record[Idx++];
@@ -4581,7 +4692,8 @@ bool ASTReader::ParseLanguageOptions(const RecordData &Record,
}
LangOpts.CommentOpts.ParseAllComments = Record[Idx++];
- return Listener.ReadLanguageOptions(LangOpts, Complain);
+ return Listener.ReadLanguageOptions(LangOpts, Complain,
+ AllowCompatibleDifferences);
}
bool ASTReader::ParseTargetOptions(const RecordData &Record,
@@ -5235,17 +5347,19 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
/*produces*/ Record[5]);
unsigned Idx = 6;
- unsigned NumParams = Record[Idx++];
- SmallVector<QualType, 16> ParamTypes;
- for (unsigned I = 0; I != NumParams; ++I)
- ParamTypes.push_back(readType(*Loc.F, Record, Idx));
EPI.Variadic = Record[Idx++];
EPI.HasTrailingReturn = Record[Idx++];
EPI.TypeQuals = Record[Idx++];
EPI.RefQualifier = static_cast<RefQualifierKind>(Record[Idx++]);
SmallVector<QualType, 8> ExceptionStorage;
- readExceptionSpec(*Loc.F, ExceptionStorage, EPI, Record, Idx);
+ readExceptionSpec(*Loc.F, ExceptionStorage, EPI.ExceptionSpec, Record, Idx);
+
+ unsigned NumParams = Record[Idx++];
+ SmallVector<QualType, 16> ParamTypes;
+ for (unsigned I = 0; I != NumParams; ++I)
+ ParamTypes.push_back(readType(*Loc.F, Record, Idx));
+
return Context.getFunctionType(ResultType, ParamTypes, EPI);
}
@@ -5414,13 +5528,18 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
QualType TST = readType(*Loc.F, Record, Idx); // probably derivable
// FIXME: ASTContext::getInjectedClassNameType is not currently suitable
// for AST reading, too much interdependencies.
- const Type *T;
- if (const Type *Existing = D->getTypeForDecl())
- T = Existing;
- else if (auto *Prev = D->getPreviousDecl())
- T = Prev->getTypeForDecl();
- else
+ const Type *T = nullptr;
+ for (auto *DI = D; DI; DI = DI->getPreviousDecl()) {
+ if (const Type *Existing = DI->getTypeForDecl()) {
+ T = Existing;
+ break;
+ }
+ }
+ if (!T) {
T = new (Context, TypeAlignment) InjectedClassNameType(D, TST);
+ for (auto *DI = D; DI; DI = DI->getPreviousDecl())
+ DI->setTypeForDecl(T);
+ }
return QualType(T, 0);
}
@@ -5508,24 +5627,22 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
void ASTReader::readExceptionSpec(ModuleFile &ModuleFile,
SmallVectorImpl<QualType> &Exceptions,
- FunctionProtoType::ExtProtoInfo &EPI,
+ FunctionProtoType::ExceptionSpecInfo &ESI,
const RecordData &Record, unsigned &Idx) {
ExceptionSpecificationType EST =
static_cast<ExceptionSpecificationType>(Record[Idx++]);
- EPI.ExceptionSpecType = EST;
+ ESI.Type = EST;
if (EST == EST_Dynamic) {
- EPI.NumExceptions = Record[Idx++];
- for (unsigned I = 0; I != EPI.NumExceptions; ++I)
+ for (unsigned I = 0, N = Record[Idx++]; I != N; ++I)
Exceptions.push_back(readType(ModuleFile, Record, Idx));
- EPI.Exceptions = Exceptions.data();
+ ESI.Exceptions = Exceptions;
} else if (EST == EST_ComputedNoexcept) {
- EPI.NoexceptExpr = ReadExpr(ModuleFile);
+ ESI.NoexceptExpr = ReadExpr(ModuleFile);
} else if (EST == EST_Uninstantiated) {
- EPI.ExceptionSpecDecl = ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
- EPI.ExceptionSpecTemplate =
- ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
+ ESI.SourceDecl = ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
+ ESI.SourceTemplate = ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
} else if (EST == EST_Unevaluated) {
- EPI.ExceptionSpecDecl = ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
+ ESI.SourceDecl = ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
}
}
@@ -5976,18 +6093,10 @@ void ASTReader::CompleteRedeclChain(const Decl *D) {
const DeclContext *DC = D->getDeclContext()->getRedeclContext();
- // Recursively ensure that the decl context itself is complete
- // (in particular, this matters if the decl context is a namespace).
- //
- // FIXME: This should be performed by lookup instead of here.
- cast<Decl>(DC)->getMostRecentDecl();
-
// If this is a named declaration, complete it by looking it up
// within its context.
//
- // FIXME: We don't currently handle the cases where we can't do this;
- // merging a class definition that contains unnamed entities should merge
- // those entities. Likewise, merging a function definition should merge
+ // FIXME: Merging a function definition should merge
// all mergeable entities within it.
if (isa<TranslationUnitDecl>(DC) || isa<NamespaceDecl>(DC) ||
isa<CXXRecordDecl>(DC) || isa<EnumDecl>(DC)) {
@@ -6000,6 +6109,9 @@ void ASTReader::CompleteRedeclChain(const Decl *D) {
updateOutOfDateIdentifier(*II);
} else
DC->lookup(Name);
+ } else if (needsAnonymousDeclarationNumber(cast<NamedDecl>(D))) {
+ // FIXME: It'd be nice to do something a bit more targeted here.
+ D->getDeclContext()->decls_begin();
}
}
}
@@ -6345,13 +6457,13 @@ namespace {
/// declaration context.
class DeclContextNameLookupVisitor {
ASTReader &Reader;
- SmallVectorImpl<const DeclContext *> &Contexts;
+ ArrayRef<const DeclContext *> Contexts;
DeclarationName Name;
SmallVectorImpl<NamedDecl *> &Decls;
public:
- DeclContextNameLookupVisitor(ASTReader &Reader,
- SmallVectorImpl<const DeclContext *> &Contexts,
+ DeclContextNameLookupVisitor(ASTReader &Reader,
+ ArrayRef<const DeclContext *> Contexts,
DeclarationName Name,
SmallVectorImpl<NamedDecl *> &Decls)
: Reader(Reader), Contexts(Contexts), Name(Name), Decls(Decls) { }
@@ -6364,9 +6476,9 @@ namespace {
// this context in this module.
ModuleFile::DeclContextInfosMap::iterator Info;
bool FoundInfo = false;
- for (unsigned I = 0, N = This->Contexts.size(); I != N; ++I) {
- Info = M.DeclContextInfos.find(This->Contexts[I]);
- if (Info != M.DeclContextInfos.end() &&
+ for (auto *DC : This->Contexts) {
+ Info = M.DeclContextInfos.find(DC);
+ if (Info != M.DeclContextInfos.end() &&
Info->second.NameLookupTableData) {
FoundInfo = true;
break;
@@ -6375,7 +6487,7 @@ namespace {
if (!FoundInfo)
return false;
-
+
// Look for this name within this module.
ASTDeclContextNameLookupTable *LookupTable =
Info->second.NameLookupTableData;
@@ -6396,9 +6508,11 @@ namespace {
// currently read before reading its name. The lookup is triggered by
// building that decl (likely indirectly), and so it is later in the
// sense of "already existing" and can be ignored here.
+ // FIXME: This should not happen; deserializing declarations should
+ // not perform lookups since that can lead to deserialization cycles.
continue;
}
-
+
// Record this declaration.
FoundAnything = true;
This->Decls.push_back(ND);
@@ -6438,15 +6552,17 @@ ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC,
if (!Name)
return false;
+ Deserializing LookupResults(this);
+
SmallVector<NamedDecl *, 64> Decls;
-
+
// Compute the declaration contexts we need to look into. Multiple such
// declaration contexts occur when two declaration contexts from disjoint
// modules get merged, e.g., when two namespaces with the same name are
// independently defined in separate modules.
SmallVector<const DeclContext *, 2> Contexts;
Contexts.push_back(DC);
-
+
if (DC->isNamespace()) {
auto Merged = MergedDecls.find(const_cast<Decl *>(cast<Decl>(DC)));
if (Merged != MergedDecls.end()) {
@@ -6454,24 +6570,45 @@ ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC,
Contexts.push_back(cast<DeclContext>(GetDecl(Merged->second[I])));
}
}
- if (isa<CXXRecordDecl>(DC)) {
- auto Merged = MergedLookups.find(DC);
- if (Merged != MergedLookups.end())
- Contexts.insert(Contexts.end(), Merged->second.begin(),
- Merged->second.end());
- }
- DeclContextNameLookupVisitor Visitor(*this, Contexts, Name, Decls);
+ auto LookUpInContexts = [&](ArrayRef<const DeclContext*> Contexts) {
+ DeclContextNameLookupVisitor Visitor(*this, Contexts, Name, Decls);
- // If we can definitively determine which module file to look into,
- // only look there. Otherwise, look in all module files.
- ModuleFile *Definitive;
- if (Contexts.size() == 1 &&
- (Definitive = getDefinitiveModuleFileFor(DC, *this))) {
- DeclContextNameLookupVisitor::visit(*Definitive, &Visitor);
- } else {
- ModuleMgr.visit(&DeclContextNameLookupVisitor::visit, &Visitor);
+ // If we can definitively determine which module file to look into,
+ // only look there. Otherwise, look in all module files.
+ ModuleFile *Definitive;
+ if (Contexts.size() == 1 &&
+ (Definitive = getDefinitiveModuleFileFor(Contexts[0], *this))) {
+ DeclContextNameLookupVisitor::visit(*Definitive, &Visitor);
+ } else {
+ ModuleMgr.visit(&DeclContextNameLookupVisitor::visit, &Visitor);
+ }
+ };
+
+ LookUpInContexts(Contexts);
+
+ // If this might be an implicit special member function, then also search
+ // all merged definitions of the surrounding class. We need to search them
+ // individually, because finding an entity in one of them doesn't imply that
+ // we can't find a different entity in another one.
+ if (isa<CXXRecordDecl>(DC)) {
+ auto Kind = Name.getNameKind();
+ if (Kind == DeclarationName::CXXConstructorName ||
+ Kind == DeclarationName::CXXDestructorName ||
+ (Kind == DeclarationName::CXXOperatorName &&
+ Name.getCXXOverloadedOperator() == OO_Equal)) {
+ auto Merged = MergedLookups.find(DC);
+ if (Merged != MergedLookups.end()) {
+ for (unsigned I = 0; I != Merged->second.size(); ++I) {
+ LookUpInContexts(Merged->second[I]);
+ // We might have just added some more merged lookups. If so, our
+ // iterator is now invalid, so grab a fresh one before continuing.
+ Merged = MergedLookups.find(DC);
+ }
+ }
+ }
}
+
++NumVisibleDeclContextsRead;
SetExternalVisibleDeclsForName(DC, Name, Decls);
return !Decls.empty();
@@ -6786,11 +6923,11 @@ void ASTReader::InitializeSema(Sema &S) {
// Makes sure any declarations that were deserialized "too early"
// still get added to the identifier's declaration chains.
- for (unsigned I = 0, N = PreloadedDecls.size(); I != N; ++I) {
- pushExternalDeclIntoScope(PreloadedDecls[I],
- PreloadedDecls[I]->getDeclName());
+ for (uint64_t ID : PreloadedDeclIDs) {
+ NamedDecl *D = cast<NamedDecl>(GetDecl(ID));
+ pushExternalDeclIntoScope(D, D->getDeclName());
}
- PreloadedDecls.clear();
+ PreloadedDeclIDs.clear();
// FIXME: What happens if these are changed by a module import?
if (!FPPragmaOptions.empty()) {
@@ -6924,15 +7061,18 @@ namespace clang { namespace serialization {
unsigned PriorGeneration;
unsigned InstanceBits;
unsigned FactoryBits;
+ bool InstanceHasMoreThanOneDecl;
+ bool FactoryHasMoreThanOneDecl;
SmallVector<ObjCMethodDecl *, 4> InstanceMethods;
SmallVector<ObjCMethodDecl *, 4> FactoryMethods;
public:
- ReadMethodPoolVisitor(ASTReader &Reader, Selector Sel,
+ ReadMethodPoolVisitor(ASTReader &Reader, Selector Sel,
unsigned PriorGeneration)
- : Reader(Reader), Sel(Sel), PriorGeneration(PriorGeneration),
- InstanceBits(0), FactoryBits(0) { }
-
+ : Reader(Reader), Sel(Sel), PriorGeneration(PriorGeneration),
+ InstanceBits(0), FactoryBits(0), InstanceHasMoreThanOneDecl(false),
+ FactoryHasMoreThanOneDecl(false) {}
+
static bool visit(ModuleFile &M, void *UserData) {
ReadMethodPoolVisitor *This
= static_cast<ReadMethodPoolVisitor *>(UserData);
@@ -6966,6 +7106,8 @@ namespace clang { namespace serialization {
This->FactoryMethods.append(Data.Factory.begin(), Data.Factory.end());
This->InstanceBits = Data.InstanceBits;
This->FactoryBits = Data.FactoryBits;
+ This->InstanceHasMoreThanOneDecl = Data.InstanceHasMoreThanOneDecl;
+ This->FactoryHasMoreThanOneDecl = Data.FactoryHasMoreThanOneDecl;
return true;
}
@@ -6981,6 +7123,10 @@ namespace clang { namespace serialization {
unsigned getInstanceBits() const { return InstanceBits; }
unsigned getFactoryBits() const { return FactoryBits; }
+ bool instanceHasMoreThanOneDecl() const {
+ return InstanceHasMoreThanOneDecl;
+ }
+ bool factoryHasMoreThanOneDecl() const { return FactoryHasMoreThanOneDecl; }
};
} } // end namespace clang::serialization
@@ -7015,11 +7161,17 @@ void ASTReader::ReadMethodPool(Selector Sel) {
Sema &S = *getSema();
Sema::GlobalMethodPool::iterator Pos
= S.MethodPool.insert(std::make_pair(Sel, Sema::GlobalMethods())).first;
-
- addMethodsToPool(S, Visitor.getInstanceMethods(), Pos->second.first);
- addMethodsToPool(S, Visitor.getFactoryMethods(), Pos->second.second);
+
Pos->second.first.setBits(Visitor.getInstanceBits());
+ Pos->second.first.setHasMoreThanOneDecl(Visitor.instanceHasMoreThanOneDecl());
Pos->second.second.setBits(Visitor.getFactoryBits());
+ Pos->second.second.setHasMoreThanOneDecl(Visitor.factoryHasMoreThanOneDecl());
+
+ // Add methods to the global pool *after* setting hasMoreThanOneDecl, since
+ // when building a module we keep every method individually and may need to
+ // update hasMoreThanOneDecl as we add the methods.
+ addMethodsToPool(S, Visitor.getInstanceMethods(), Pos->second.first);
+ addMethodsToPool(S, Visitor.getFactoryMethods(), Pos->second.second);
}
void ASTReader::ReadKnownNamespaces(
@@ -7095,6 +7247,18 @@ void ASTReader::ReadDynamicClasses(SmallVectorImpl<CXXRecordDecl *> &Decls) {
DynamicClasses.clear();
}
+void ASTReader::ReadUnusedLocalTypedefNameCandidates(
+ llvm::SmallSetVector<const TypedefNameDecl *, 4> &Decls) {
+ for (unsigned I = 0, N = UnusedLocalTypedefNameCandidates.size(); I != N;
+ ++I) {
+ TypedefNameDecl *D = dyn_cast_or_null<TypedefNameDecl>(
+ GetDecl(UnusedLocalTypedefNameCandidates[I]));
+ if (D)
+ Decls.insert(D);
+ }
+ UnusedLocalTypedefNameCandidates.clear();
+}
+
void
ASTReader::ReadLocallyScopedExternCDecls(SmallVectorImpl<NamedDecl *> &Decls) {
for (unsigned I = 0, N = LocallyScopedExternCDecls.size(); I != N; ++I) {
@@ -7230,24 +7394,26 @@ ASTReader::SetGloballyVisibleDecls(IdentifierInfo *II,
}
for (unsigned I = 0, N = DeclIDs.size(); I != N; ++I) {
- NamedDecl *D = cast<NamedDecl>(GetDecl(DeclIDs[I]));
- if (SemaObj) {
- // If we're simply supposed to record the declarations, do so now.
- if (Decls) {
- Decls->push_back(D);
- continue;
- }
-
- // Introduce this declaration into the translation-unit scope
- // and add it to the declaration chain for this identifier, so
- // that (unqualified) name lookup will find it.
- pushExternalDeclIntoScope(D, II);
- } else {
+ if (!SemaObj) {
// Queue this declaration so that it will be added to the
// translation unit scope and identifier's declaration chain
// once a Sema object is known.
- PreloadedDecls.push_back(D);
+ PreloadedDeclIDs.push_back(DeclIDs[I]);
+ continue;
}
+
+ NamedDecl *D = cast<NamedDecl>(GetDecl(DeclIDs[I]));
+
+ // If we're simply supposed to record the declarations, do so now.
+ if (Decls) {
+ Decls->push_back(D);
+ continue;
+ }
+
+ // Introduce this declaration into the translation-unit scope
+ // and add it to the declaration chain for this identifier, so
+ // that (unqualified) name lookup will find it.
+ pushExternalDeclIntoScope(D, II);
}
}
@@ -7584,8 +7750,7 @@ ASTReader::ReadTemplateArgument(ModuleFile &F,
return TemplateArgument(readType(F, Record, Idx));
case TemplateArgument::Declaration: {
ValueDecl *D = ReadDeclAs<ValueDecl>(F, Record, Idx);
- bool ForReferenceParam = Record[Idx++];
- return TemplateArgument(D, ForReferenceParam);
+ return TemplateArgument(D, readType(F, Record, Idx));
}
case TemplateArgument::NullPtr:
return TemplateArgument(readType(F, Record, Idx), /*isNullPtr*/true);
@@ -7806,6 +7971,12 @@ ASTReader::ReadNestedNameSpecifier(ModuleFile &F,
// No associated value, and there can't be a prefix.
break;
}
+
+ case NestedNameSpecifier::Super: {
+ CXXRecordDecl *RD = ReadDeclAs<CXXRecordDecl>(F, Record, Idx);
+ NNS = NestedNameSpecifier::SuperSpecifier(Context, RD);
+ break;
+ }
}
Prev = NNS;
}
@@ -7862,9 +8033,16 @@ ASTReader::ReadNestedNameSpecifierLoc(ModuleFile &F, const RecordData &Record,
Builder.MakeGlobal(Context, ColonColonLoc);
break;
}
+
+ case NestedNameSpecifier::Super: {
+ CXXRecordDecl *RD = ReadDeclAs<CXXRecordDecl>(F, Record, Idx);
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+ Builder.MakeSuper(Context, RD, Range.getBegin(), Range.getEnd());
+ break;
+ }
}
}
-
+
return Builder.getWithLocInContext(Context);
}
@@ -7906,6 +8084,13 @@ std::string ASTReader::ReadString(const RecordData &Record, unsigned &Idx) {
return Result;
}
+std::string ASTReader::ReadPath(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx) {
+ std::string Filename = ReadString(Record, Idx);
+ ResolveImportedPath(F, Filename);
+ return Filename;
+}
+
VersionTuple ASTReader::ReadVersionTuple(const RecordData &Record,
unsigned &Idx) {
unsigned Major = Record[Idx++];
@@ -8008,6 +8193,14 @@ void ASTReader::ReadComments() {
}
}
+void ASTReader::getInputFiles(ModuleFile &F,
+ SmallVectorImpl<serialization::InputFile> &Files) {
+ for (unsigned I = 0, E = F.InputFilesLoaded.size(); I != E; ++I) {
+ unsigned ID = I+1;
+ Files.push_back(getInputFile(F, ID));
+ }
+}
+
std::string ASTReader::getOwningModuleNameForDiagnostic(const Decl *D) {
// If we know the owning module, use it.
if (Module *M = D->getOwningModule())
@@ -8025,7 +8218,7 @@ void ASTReader::finishPendingActions() {
while (!PendingIdentifierInfos.empty() ||
!PendingIncompleteDeclChains.empty() || !PendingDeclChains.empty() ||
!PendingMacroIDs.empty() || !PendingDeclContextInfos.empty() ||
- !PendingUpdateRecords.empty() || !PendingOdrMergeChecks.empty()) {
+ !PendingUpdateRecords.empty()) {
// If any identifiers with corresponding top-level declarations have
// been loaded, load those declarations now.
typedef llvm::DenseMap<IdentifierInfo *, SmallVector<Decl *, 2> >
@@ -8073,14 +8266,16 @@ void ASTReader::finishPendingActions() {
for (unsigned IDIdx = 0, NumIDs = GlobalIDs.size(); IDIdx != NumIDs;
++IDIdx) {
const PendingMacroInfo &Info = GlobalIDs[IDIdx];
- if (Info.M->Kind != MK_Module)
+ if (Info.M->Kind != MK_ImplicitModule &&
+ Info.M->Kind != MK_ExplicitModule)
resolvePendingMacro(II, Info);
}
// Handle module imports.
for (unsigned IDIdx = 0, NumIDs = GlobalIDs.size(); IDIdx != NumIDs;
++IDIdx) {
const PendingMacroInfo &Info = GlobalIDs[IDIdx];
- if (Info.M->Kind == MK_Module)
+ if (Info.M->Kind == MK_ImplicitModule ||
+ Info.M->Kind == MK_ExplicitModule)
resolvePendingMacro(II, Info);
}
}
@@ -8097,110 +8292,36 @@ void ASTReader::finishPendingActions() {
}
// Perform any pending declaration updates.
- //
- // Don't do this if we have known-incomplete redecl chains: it relies on
- // being able to walk redeclaration chains.
- while (PendingDeclChains.empty() && !PendingUpdateRecords.empty()) {
+ while (!PendingUpdateRecords.empty()) {
auto Update = PendingUpdateRecords.pop_back_val();
ReadingKindTracker ReadingKind(Read_Decl, *this);
loadDeclUpdateRecords(Update.first, Update.second);
}
-
- // Trigger the import of the full definition of each class that had any
- // odr-merging problems, so we can produce better diagnostics for them.
- for (auto &Merge : PendingOdrMergeFailures) {
- Merge.first->buildLookup();
- Merge.first->decls_begin();
- Merge.first->bases_begin();
- Merge.first->vbases_begin();
- for (auto *RD : Merge.second) {
- RD->decls_begin();
- RD->bases_begin();
- RD->vbases_begin();
- }
- }
-
- // For each declaration from a merged context, check that the canonical
- // definition of that context also contains a declaration of the same
- // entity.
- while (!PendingOdrMergeChecks.empty()) {
- NamedDecl *D = PendingOdrMergeChecks.pop_back_val();
-
- // FIXME: Skip over implicit declarations for now. This matters for things
- // like implicitly-declared special member functions. This isn't entirely
- // correct; we can end up with multiple unmerged declarations of the same
- // implicit entity.
- if (D->isImplicit())
- continue;
-
- DeclContext *CanonDef = D->getDeclContext();
- DeclContext::lookup_result R = CanonDef->lookup(D->getDeclName());
-
- bool Found = false;
- const Decl *DCanon = D->getCanonicalDecl();
-
- llvm::SmallVector<const NamedDecl*, 4> Candidates;
- for (DeclContext::lookup_iterator I = R.begin(), E = R.end();
- !Found && I != E; ++I) {
- for (auto RI : (*I)->redecls()) {
- if (RI->getLexicalDeclContext() == CanonDef) {
- // This declaration is present in the canonical definition. If it's
- // in the same redecl chain, it's the one we're looking for.
- if (RI->getCanonicalDecl() == DCanon)
- Found = true;
- else
- Candidates.push_back(cast<NamedDecl>(RI));
- break;
- }
- }
- }
-
- if (!Found) {
- D->setInvalidDecl();
-
- std::string CanonDefModule =
- getOwningModuleNameForDiagnostic(cast<Decl>(CanonDef));
- Diag(D->getLocation(), diag::err_module_odr_violation_missing_decl)
- << D << getOwningModuleNameForDiagnostic(D)
- << CanonDef << CanonDefModule.empty() << CanonDefModule;
-
- if (Candidates.empty())
- Diag(cast<Decl>(CanonDef)->getLocation(),
- diag::note_module_odr_violation_no_possible_decls) << D;
- else {
- for (unsigned I = 0, N = Candidates.size(); I != N; ++I)
- Diag(Candidates[I]->getLocation(),
- diag::note_module_odr_violation_possible_decl)
- << Candidates[I];
- }
-
- DiagnosedOdrMergeFailures.insert(CanonDef);
- }
- }
}
// If we deserialized any C++ or Objective-C class definitions, any
// Objective-C protocol definitions, or any redeclarable templates, make sure
// that all redeclarations point to the definitions. Note that this can only
// happen now, after the redeclaration chains have been fully wired.
- for (llvm::SmallPtrSet<Decl *, 4>::iterator D = PendingDefinitions.begin(),
- DEnd = PendingDefinitions.end();
- D != DEnd; ++D) {
- if (TagDecl *TD = dyn_cast<TagDecl>(*D)) {
+ for (Decl *D : PendingDefinitions) {
+ if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
if (const TagType *TagT = dyn_cast<TagType>(TD->getTypeForDecl())) {
// Make sure that the TagType points at the definition.
const_cast<TagType*>(TagT)->decl = TD;
}
- if (auto RD = dyn_cast<CXXRecordDecl>(*D)) {
- for (auto R : RD->redecls())
+ if (auto RD = dyn_cast<CXXRecordDecl>(D)) {
+ for (auto R : RD->redecls()) {
+ assert((R == D) == R->isThisDeclarationADefinition() &&
+ "declaration thinks it's the definition but it isn't");
cast<CXXRecordDecl>(R)->DefinitionData = RD->DefinitionData;
+ }
}
continue;
}
- if (auto ID = dyn_cast<ObjCInterfaceDecl>(*D)) {
+ if (auto ID = dyn_cast<ObjCInterfaceDecl>(D)) {
// Make sure that the ObjCInterfaceType points at the definition.
const_cast<ObjCInterfaceType *>(cast<ObjCInterfaceType>(ID->TypeForDecl))
->Decl = ID;
@@ -8211,14 +8332,14 @@ void ASTReader::finishPendingActions() {
continue;
}
- if (auto PD = dyn_cast<ObjCProtocolDecl>(*D)) {
+ if (auto PD = dyn_cast<ObjCProtocolDecl>(D)) {
for (auto R : PD->redecls())
R->Data = PD->Data;
continue;
}
- auto RTD = cast<RedeclarableTemplateDecl>(*D)->getCanonicalDecl();
+ auto RTD = cast<RedeclarableTemplateDecl>(D)->getCanonicalDecl();
for (auto R : RTD->redecls())
R->Common = RTD->Common;
}
@@ -8243,10 +8364,108 @@ void ASTReader::finishPendingActions() {
MD->setLazyBody(PB->second);
}
PendingBodies.clear();
+}
+
+void ASTReader::diagnoseOdrViolations() {
+ if (PendingOdrMergeFailures.empty() && PendingOdrMergeChecks.empty())
+ return;
+
+ // Trigger the import of the full definition of each class that had any
+ // odr-merging problems, so we can produce better diagnostics for them.
+ // These updates may in turn find and diagnose some ODR failures, so take
+ // ownership of the set first.
+ auto OdrMergeFailures = std::move(PendingOdrMergeFailures);
+ PendingOdrMergeFailures.clear();
+ for (auto &Merge : OdrMergeFailures) {
+ Merge.first->buildLookup();
+ Merge.first->decls_begin();
+ Merge.first->bases_begin();
+ Merge.first->vbases_begin();
+ for (auto *RD : Merge.second) {
+ RD->decls_begin();
+ RD->bases_begin();
+ RD->vbases_begin();
+ }
+ }
+
+ // For each declaration from a merged context, check that the canonical
+ // definition of that context also contains a declaration of the same
+ // entity.
+ //
+ // Caution: this loop does things that might invalidate iterators into
+ // PendingOdrMergeChecks. Don't turn this into a range-based for loop!
+ while (!PendingOdrMergeChecks.empty()) {
+ NamedDecl *D = PendingOdrMergeChecks.pop_back_val();
+
+ // FIXME: Skip over implicit declarations for now. This matters for things
+ // like implicitly-declared special member functions. This isn't entirely
+ // correct; we can end up with multiple unmerged declarations of the same
+ // implicit entity.
+ if (D->isImplicit())
+ continue;
+
+ DeclContext *CanonDef = D->getDeclContext();
+
+ bool Found = false;
+ const Decl *DCanon = D->getCanonicalDecl();
+
+ for (auto RI : D->redecls()) {
+ if (RI->getLexicalDeclContext() == CanonDef) {
+ Found = true;
+ break;
+ }
+ }
+ if (Found)
+ continue;
+
+ llvm::SmallVector<const NamedDecl*, 4> Candidates;
+ DeclContext::lookup_result R = CanonDef->lookup(D->getDeclName());
+ for (DeclContext::lookup_iterator I = R.begin(), E = R.end();
+ !Found && I != E; ++I) {
+ for (auto RI : (*I)->redecls()) {
+ if (RI->getLexicalDeclContext() == CanonDef) {
+ // This declaration is present in the canonical definition. If it's
+ // in the same redecl chain, it's the one we're looking for.
+ if (RI->getCanonicalDecl() == DCanon)
+ Found = true;
+ else
+ Candidates.push_back(cast<NamedDecl>(RI));
+ break;
+ }
+ }
+ }
+
+ if (!Found) {
+ // The AST doesn't like TagDecls becoming invalid after they've been
+ // completed. We only really need to mark FieldDecls as invalid here.
+ if (!isa<TagDecl>(D))
+ D->setInvalidDecl();
+
+ std::string CanonDefModule =
+ getOwningModuleNameForDiagnostic(cast<Decl>(CanonDef));
+ Diag(D->getLocation(), diag::err_module_odr_violation_missing_decl)
+ << D << getOwningModuleNameForDiagnostic(D)
+ << CanonDef << CanonDefModule.empty() << CanonDefModule;
+
+ if (Candidates.empty())
+ Diag(cast<Decl>(CanonDef)->getLocation(),
+ diag::note_module_odr_violation_no_possible_decls) << D;
+ else {
+ for (unsigned I = 0, N = Candidates.size(); I != N; ++I)
+ Diag(Candidates[I]->getLocation(),
+ diag::note_module_odr_violation_possible_decl)
+ << Candidates[I];
+ }
+
+ DiagnosedOdrMergeFailures.insert(CanonDef);
+ }
+ }
// Issue any pending ODR-failure diagnostics.
- for (auto &Merge : PendingOdrMergeFailures) {
- if (!DiagnosedOdrMergeFailures.insert(Merge.first))
+ for (auto &Merge : OdrMergeFailures) {
+ // If we've already pointed out a specific problem with this class, don't
+ // bother issuing a general "something's different" diagnostic.
+ if (!DiagnosedOdrMergeFailures.insert(Merge.first).second)
continue;
bool Diagnosed = false;
@@ -8282,7 +8501,6 @@ void ASTReader::finishPendingActions() {
<< Merge.first;
}
}
- PendingOdrMergeFailures.clear();
}
void ASTReader::FinishedDeserializing() {
@@ -8295,10 +8513,13 @@ void ASTReader::FinishedDeserializing() {
}
--NumCurrentElementsDeserializing;
- if (NumCurrentElementsDeserializing == 0 && Consumer) {
+ if (NumCurrentElementsDeserializing == 0) {
+ diagnoseOdrViolations();
+
// We are not in recursive loading, so it's safe to pass the "interesting"
// decls to the consumer.
- PassInterestingDeclsToConsumer();
+ if (Consumer)
+ PassInterestingDeclsToConsumer();
}
}
diff --git a/lib/Serialization/ASTReaderDecl.cpp b/lib/Serialization/ASTReaderDecl.cpp
index 9ed1bf97ec7a..a783183d2ef6 100644
--- a/lib/Serialization/ASTReaderDecl.cpp
+++ b/lib/Serialization/ASTReaderDecl.cpp
@@ -43,6 +43,9 @@ namespace clang {
const RecordData &Record;
unsigned &Idx;
TypeID TypeIDForTypeDecl;
+ unsigned AnonymousDeclNumber;
+ GlobalDeclID NamedDeclForTagDecl;
+ IdentifierInfo *TypedefNameForLinkage;
bool HasPendingBody;
@@ -106,6 +109,12 @@ namespace clang {
void MergeDefinitionData(CXXRecordDecl *D,
struct CXXRecordDecl::DefinitionData &NewDD);
+ static NamedDecl *getAnonymousDeclForMerging(ASTReader &Reader,
+ DeclContext *DC,
+ unsigned Index);
+ static void setAnonymousDeclForMerging(ASTReader &Reader, DeclContext *DC,
+ unsigned Index, NamedDecl *D);
+
/// \brief RAII class used to capture the first ID within a redeclaration
/// chain and to introduce it into the list of pending redeclaration chains
/// on destruction.
@@ -134,7 +143,7 @@ namespace clang {
~RedeclarableResult() {
if (FirstID && Owning && isRedeclarableDeclKind(DeclKind) &&
- Reader.PendingDeclChainsKnown.insert(FirstID))
+ Reader.PendingDeclChainsKnown.insert(FirstID).second)
Reader.PendingDeclChains.push_back(FirstID);
}
@@ -158,50 +167,59 @@ namespace clang {
NamedDecl *New;
NamedDecl *Existing;
mutable bool AddResult;
-
+
+ unsigned AnonymousDeclNumber;
+ IdentifierInfo *TypedefNameForLinkage;
+
void operator=(FindExistingResult&) LLVM_DELETED_FUNCTION;
-
+
public:
FindExistingResult(ASTReader &Reader)
- : Reader(Reader), New(nullptr), Existing(nullptr), AddResult(false) {}
+ : Reader(Reader), New(nullptr), Existing(nullptr), AddResult(false),
+ AnonymousDeclNumber(0), TypedefNameForLinkage(0) {}
+
+ FindExistingResult(ASTReader &Reader, NamedDecl *New, NamedDecl *Existing,
+ unsigned AnonymousDeclNumber,
+ IdentifierInfo *TypedefNameForLinkage)
+ : Reader(Reader), New(New), Existing(Existing), AddResult(true),
+ AnonymousDeclNumber(AnonymousDeclNumber),
+ TypedefNameForLinkage(TypedefNameForLinkage) {}
- FindExistingResult(ASTReader &Reader, NamedDecl *New, NamedDecl *Existing)
- : Reader(Reader), New(New), Existing(Existing), AddResult(true) { }
-
FindExistingResult(const FindExistingResult &Other)
- : Reader(Other.Reader), New(Other.New), Existing(Other.Existing),
- AddResult(Other.AddResult)
- {
+ : Reader(Other.Reader), New(Other.New), Existing(Other.Existing),
+ AddResult(Other.AddResult),
+ AnonymousDeclNumber(Other.AnonymousDeclNumber),
+ TypedefNameForLinkage(Other.TypedefNameForLinkage) {
Other.AddResult = false;
}
-
+
~FindExistingResult();
-
+
/// \brief Suppress the addition of this result into the known set of
/// names.
void suppress() { AddResult = false; }
-
+
operator NamedDecl*() const { return Existing; }
-
+
template<typename T>
operator T*() const { return dyn_cast_or_null<T>(Existing); }
};
-
+
FindExistingResult findExisting(NamedDecl *D);
-
+
public:
- ASTDeclReader(ASTReader &Reader, ModuleFile &F,
- DeclID thisDeclID,
- unsigned RawLocation,
- const RecordData &Record, unsigned &Idx)
- : Reader(Reader), F(F), ThisDeclID(thisDeclID),
- RawLocation(RawLocation), Record(Record), Idx(Idx),
- TypeIDForTypeDecl(0), HasPendingBody(false) { }
+ ASTDeclReader(ASTReader &Reader, ModuleFile &F, DeclID thisDeclID,
+ unsigned RawLocation, const RecordData &Record, unsigned &Idx)
+ : Reader(Reader), F(F), ThisDeclID(thisDeclID),
+ RawLocation(RawLocation), Record(Record), Idx(Idx),
+ TypeIDForTypeDecl(0), NamedDeclForTagDecl(0),
+ TypedefNameForLinkage(nullptr), HasPendingBody(false) {}
template <typename DeclT>
- static void attachPreviousDeclImpl(Redeclarable<DeclT> *D, Decl *Previous);
- static void attachPreviousDeclImpl(...);
- static void attachPreviousDecl(Decl *D, Decl *previous);
+ static void attachPreviousDeclImpl(ASTReader &Reader,
+ Redeclarable<DeclT> *D, Decl *Previous);
+ static void attachPreviousDeclImpl(ASTReader &Reader, ...);
+ static void attachPreviousDecl(ASTReader &Reader, Decl *D, Decl *Previous);
template <typename DeclT>
static void attachLatestDeclImpl(Redeclarable<DeclT> *D, Decl *Latest);
@@ -233,7 +251,7 @@ namespace clang {
void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
void VisitTypeDecl(TypeDecl *TD);
- void VisitTypedefNameDecl(TypedefNameDecl *TD);
+ RedeclarableResult VisitTypedefNameDecl(TypedefNameDecl *TD);
void VisitTypedefDecl(TypedefDecl *TD);
void VisitTypeAliasDecl(TypeAliasDecl *TD);
void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
@@ -359,6 +377,12 @@ void ASTDeclReader::Visit(Decl *D) {
if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
// We have a fully initialized TypeDecl. Read its type now.
TD->setTypeForDecl(Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull());
+
+ // If this is a tag declaration with a typedef name for linkage, it's safe
+ // to load that typedef now.
+ if (NamedDeclForTagDecl)
+ cast<TagDecl>(D)->NamedDeclOrQualifier =
+ cast<NamedDecl>(Reader.GetDecl(NamedDeclForTagDecl));
} else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
// if we have a fully initialized TypeDecl, we can safely read its type now.
ID->TypeForDecl = Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull();
@@ -446,6 +470,8 @@ void ASTDeclReader::VisitTranslationUnitDecl(TranslationUnitDecl *TU) {
void ASTDeclReader::VisitNamedDecl(NamedDecl *ND) {
VisitDecl(ND);
ND->setDeclName(Reader.ReadDeclarationName(F, Record, Idx));
+ if (needsAnonymousDeclarationNumber(ND))
+ AnonymousDeclNumber = Record[Idx++];
}
void ASTDeclReader::VisitTypeDecl(TypeDecl *TD) {
@@ -455,7 +481,8 @@ void ASTDeclReader::VisitTypeDecl(TypeDecl *TD) {
TypeIDForTypeDecl = Reader.getGlobalTypeID(F, Record[Idx++]);
}
-void ASTDeclReader::VisitTypedefNameDecl(TypedefNameDecl *TD) {
+ASTDeclReader::RedeclarableResult
+ASTDeclReader::VisitTypedefNameDecl(TypedefNameDecl *TD) {
RedeclarableResult Redecl = VisitRedeclarable(TD);
VisitTypeDecl(TD);
TypeSourceInfo *TInfo = GetTypeSourceInfo(Record, Idx);
@@ -464,15 +491,21 @@ void ASTDeclReader::VisitTypedefNameDecl(TypedefNameDecl *TD) {
TD->setModedTypeSourceInfo(TInfo, modedT);
} else
TD->setTypeSourceInfo(TInfo);
- mergeRedeclarable(TD, Redecl);
+ return Redecl;
}
void ASTDeclReader::VisitTypedefDecl(TypedefDecl *TD) {
- VisitTypedefNameDecl(TD);
+ RedeclarableResult Redecl = VisitTypedefNameDecl(TD);
+ mergeRedeclarable(TD, Redecl);
}
void ASTDeclReader::VisitTypeAliasDecl(TypeAliasDecl *TD) {
- VisitTypedefNameDecl(TD);
+ RedeclarableResult Redecl = VisitTypedefNameDecl(TD);
+ if (auto *Template = ReadDeclAs<TypeAliasTemplateDecl>(Record, Idx))
+ // Merged when we merge the template.
+ TD->setDescribedAliasTemplate(Template);
+ else
+ mergeRedeclarable(TD, Redecl);
}
ASTDeclReader::RedeclarableResult ASTDeclReader::VisitTagDecl(TagDecl *TD) {
@@ -481,18 +514,32 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitTagDecl(TagDecl *TD) {
TD->IdentifierNamespace = Record[Idx++];
TD->setTagKind((TagDecl::TagKind)Record[Idx++]);
- TD->setCompleteDefinition(Record[Idx++]);
+ if (!isa<CXXRecordDecl>(TD))
+ TD->setCompleteDefinition(Record[Idx++]);
TD->setEmbeddedInDeclarator(Record[Idx++]);
TD->setFreeStanding(Record[Idx++]);
TD->setCompleteDefinitionRequired(Record[Idx++]);
TD->setRBraceLoc(ReadSourceLocation(Record, Idx));
- if (Record[Idx++]) { // hasExtInfo
+ switch (Record[Idx++]) {
+ case 0:
+ break;
+ case 1: { // ExtInfo
TagDecl::ExtInfo *Info = new (Reader.getContext()) TagDecl::ExtInfo();
ReadQualifierInfo(*Info, Record, Idx);
TD->NamedDeclOrQualifier = Info;
- } else
- TD->NamedDeclOrQualifier = ReadDeclAs<NamedDecl>(Record, Idx);
+ break;
+ }
+ case 2: // TypedefNameForAnonDecl
+ NamedDeclForTagDecl = ReadDeclID(Record, Idx);
+ TypedefNameForLinkage = Reader.GetIdentifierInfo(F, Record, Idx);
+ break;
+ case 3: // DeclaratorForAnonDecl
+ NamedDeclForTagDecl = ReadDeclID(Record, Idx);
+ break;
+ default:
+ llvm_unreachable("unexpected tag info kind");
+ }
if (!isa<CXXRecordDecl>(TD))
mergeRedeclarable(TD, Redecl);
@@ -953,8 +1000,15 @@ void ASTDeclReader::VisitFieldDecl(FieldDecl *FD) {
VisitDeclaratorDecl(FD);
FD->Mutable = Record[Idx++];
if (int BitWidthOrInitializer = Record[Idx++]) {
- FD->InitializerOrBitWidth.setInt(BitWidthOrInitializer - 1);
- FD->InitializerOrBitWidth.setPointer(Reader.ReadExpr(F));
+ FD->InitStorage.setInt(
+ static_cast<FieldDecl::InitStorageKind>(BitWidthOrInitializer - 1));
+ if (FD->InitStorage.getInt() == FieldDecl::ISK_CapturedVLAType) {
+ // Read captured variable length array.
+ FD->InitStorage.setPointer(
+ Reader.readType(F, Record, Idx).getAsOpaquePtr());
+ } else {
+ FD->InitStorage.setPointer(Reader.ReadExpr(F));
+ }
}
if (!FD->getDeclName()) {
if (FieldDecl *Tmpl = ReadDeclAs<FieldDecl>(Record, Idx))
@@ -1140,7 +1194,7 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
// any other module's anonymous namespaces, so don't attach the anonymous
// namespace at all.
NamespaceDecl *Anon = ReadDeclAs<NamespaceDecl>(Record, Idx);
- if (F.Kind != MK_Module)
+ if (F.Kind != MK_ImplicitModule && F.Kind != MK_ExplicitModule)
D->setAnonymousNamespace(Anon);
} else {
// Link this namespace back to the first declaration, which has already
@@ -1152,11 +1206,13 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
}
void ASTDeclReader::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarable(D);
VisitNamedDecl(D);
D->NamespaceLoc = ReadSourceLocation(Record, Idx);
D->IdentLoc = ReadSourceLocation(Record, Idx);
D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
D->Namespace = ReadDeclAs<NamedDecl>(Record, Idx);
+ mergeRedeclarable(D, Redecl);
}
void ASTDeclReader::VisitUsingDecl(UsingDecl *D) {
@@ -1168,6 +1224,7 @@ void ASTDeclReader::VisitUsingDecl(UsingDecl *D) {
D->setTypename(Record[Idx++]);
if (NamedDecl *Pattern = ReadDeclAs<NamedDecl>(Record, Idx))
Reader.getContext().setInstantiatedFromUsingDecl(D, Pattern);
+ mergeMergeable(D);
}
void ASTDeclReader::VisitUsingShadowDecl(UsingShadowDecl *D) {
@@ -1195,6 +1252,7 @@ void ASTDeclReader::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
D->setUsingLoc(ReadSourceLocation(Record, Idx));
D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
ReadDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record, Idx);
+ mergeMergeable(D);
}
void ASTDeclReader::VisitUnresolvedUsingTypenameDecl(
@@ -1202,6 +1260,7 @@ void ASTDeclReader::VisitUnresolvedUsingTypenameDecl(
VisitTypeDecl(D);
D->TypenameLocation = ReadSourceLocation(Record, Idx);
D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ mergeMergeable(D);
}
void ASTDeclReader::ReadCXXDefinitionData(
@@ -1279,6 +1338,7 @@ void ASTDeclReader::ReadCXXDefinitionData(
LambdaCaptureKind Kind = static_cast<LambdaCaptureKind>(Record[Idx++]);
switch (Kind) {
case LCK_This:
+ case LCK_VLAType:
*ToCapture++ = Capture(Loc, IsImplicit, Kind, nullptr,SourceLocation());
break;
case LCK_ByCopy:
@@ -1300,8 +1360,11 @@ void ASTDeclReader::MergeDefinitionData(
// If the new definition has new special members, let the name lookup
// code know that it needs to look in the new definition too.
- if ((MergeDD.DeclaredSpecialMembers & ~DD.DeclaredSpecialMembers) &&
- DD.Definition != MergeDD.Definition) {
+ //
+ // FIXME: We only need to do this if the merged definition declares members
+ // that this definition did not declare, or if it defines members that this
+ // definition did not define.
+ if (MergeDD.DeclaredSpecialMembers && DD.Definition != MergeDD.Definition) {
Reader.MergedLookups[DD.Definition].push_back(MergeDD.Definition);
DD.Definition->setHasExternalVisibleStorage();
}
@@ -1493,12 +1556,19 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
void ASTDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) {
VisitFunctionDecl(D);
+
unsigned NumOverridenMethods = Record[Idx++];
- while (NumOverridenMethods--) {
- // Avoid invariant checking of CXXMethodDecl::addOverriddenMethod,
- // MD may be initializing.
- if (CXXMethodDecl *MD = ReadDeclAs<CXXMethodDecl>(Record, Idx))
- Reader.getContext().addOverriddenMethod(D, MD);
+ if (D->isCanonicalDecl()) {
+ while (NumOverridenMethods--) {
+ // Avoid invariant checking of CXXMethodDecl::addOverriddenMethod,
+ // MD may be initializing.
+ if (CXXMethodDecl *MD = ReadDeclAs<CXXMethodDecl>(Record, Idx))
+ Reader.getContext().addOverriddenMethod(D, MD->getCanonicalDecl());
+ }
+ } else {
+ // We don't care about which declarations this used to override; we get
+ // the relevant information from the canonical declaration.
+ Idx += NumOverridenMethods;
}
}
@@ -2015,6 +2085,8 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase,
T *D = static_cast<T*>(DBase);
T *DCanon = D->getCanonicalDecl();
if (D != DCanon &&
+ // IDs < NUM_PREDEF_DECL_IDS are not loaded from an AST file.
+ Redecl.getFirstID() >= NUM_PREDEF_DECL_IDS &&
(!Reader.getContext().getLangOpts().Modules ||
Reader.getOwningModuleFile(DCanon) == Reader.getOwningModuleFile(D))) {
// All redeclarations between this declaration and its originally-canonical
@@ -2075,6 +2147,9 @@ void ASTDeclReader::mergeTemplatePattern(RedeclarableTemplateDecl *D,
Result);
if (auto *DVar = dyn_cast<VarDecl>(DPattern))
return mergeRedeclarable(DVar, cast<VarDecl>(ExistingPattern), Result);
+ if (auto *DAlias = dyn_cast<TypeAliasDecl>(DPattern))
+ return mergeRedeclarable(DAlias, cast<TypedefNameDecl>(ExistingPattern),
+ Result);
llvm_unreachable("merged an unknown kind of redeclarable template");
}
@@ -2199,7 +2274,8 @@ static bool isConsumerInterestedIn(Decl *D, bool HasBody) {
if (isa<FileScopeAsmDecl>(D) ||
isa<ObjCProtocolDecl>(D) ||
isa<ObjCImplDecl>(D) ||
- isa<ImportDecl>(D))
+ isa<ImportDecl>(D) ||
+ isa<OMPThreadPrivateDecl>(D))
return true;
if (VarDecl *Var = dyn_cast<VarDecl>(D))
return Var->isFileVarDecl() &&
@@ -2269,6 +2345,53 @@ static bool isSameTemplateParameter(const NamedDecl *X,
TY->getTemplateParameters());
}
+static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) {
+ if (auto *NS = X->getAsNamespace())
+ return NS;
+ if (auto *NAS = X->getAsNamespaceAlias())
+ return NAS->getNamespace();
+ return nullptr;
+}
+
+static bool isSameQualifier(const NestedNameSpecifier *X,
+ const NestedNameSpecifier *Y) {
+ if (auto *NSX = getNamespace(X)) {
+ auto *NSY = getNamespace(Y);
+ if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl())
+ return false;
+ } else if (X->getKind() != Y->getKind())
+ return false;
+
+ // FIXME: For namespaces and types, we're permitted to check that the entity
+ // is named via the same tokens. We should probably do so.
+ switch (X->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ if (X->getAsIdentifier() != Y->getAsIdentifier())
+ return false;
+ break;
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ // We've already checked that we named the same namespace.
+ break;
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ if (X->getAsType()->getCanonicalTypeInternal() !=
+ Y->getAsType()->getCanonicalTypeInternal())
+ return false;
+ break;
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
+ return true;
+ }
+
+ // Recurse into earlier portion of NNS, if any.
+ auto *PX = X->getPrefix();
+ auto *PY = Y->getPrefix();
+ if (PX && PY)
+ return isSameQualifier(PX, PY);
+ return !PX && !PY;
+}
+
/// \brief Determine whether two template parameter lists are similar enough
/// that they may be used in declarations of the same template.
static bool isSameTemplateParameterList(const TemplateParameterList *X,
@@ -2286,10 +2409,10 @@ static bool isSameTemplateParameterList(const TemplateParameterList *X,
/// \brief Determine whether the two declarations refer to the same entity.
static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
assert(X->getDeclName() == Y->getDeclName() && "Declaration name mismatch!");
-
+
if (X == Y)
return true;
-
+
// Must be in the same context.
if (!X->getDeclContext()->getRedeclContext()->Equals(
Y->getDeclContext()->getRedeclContext()))
@@ -2301,11 +2424,11 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
if (TypedefNameDecl *TypedefY = dyn_cast<TypedefNameDecl>(Y))
return X->getASTContext().hasSameType(TypedefX->getUnderlyingType(),
TypedefY->getUnderlyingType());
-
+
// Must have the same kind.
if (X->getKind() != Y->getKind())
return false;
-
+
// Objective-C classes and protocols with the same name always match.
if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X))
return true;
@@ -2327,8 +2450,8 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
}
// Functions with the same type and linkage match.
- // FIXME: This needs to cope with function template specializations,
- // merging of prototyped/non-prototyped functions, etc.
+ // FIXME: This needs to cope with merging of prototyped/non-prototyped
+ // functions, etc.
if (FunctionDecl *FuncX = dyn_cast<FunctionDecl>(X)) {
FunctionDecl *FuncY = cast<FunctionDecl>(Y);
return (FuncX->getLinkageInternal() == FuncY->getLinkageInternal()) &&
@@ -2361,7 +2484,6 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
// Fields with the same name and the same type match.
if (FieldDecl *FDX = dyn_cast<FieldDecl>(X)) {
FieldDecl *FDY = cast<FieldDecl>(Y);
- // FIXME: Diagnose if the types don't match.
// FIXME: Also check the bitwidth is odr-equivalent, if any.
return X->getASTContext().hasSameType(FDX->getType(), FDY->getType());
}
@@ -2377,6 +2499,30 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
return USX->getTargetDecl() == USY->getTargetDecl();
}
+ // Using declarations with the same qualifier match. (We already know that
+ // the name matches.)
+ if (auto *UX = dyn_cast<UsingDecl>(X)) {
+ auto *UY = cast<UsingDecl>(Y);
+ return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
+ UX->hasTypename() == UY->hasTypename() &&
+ UX->isAccessDeclaration() == UY->isAccessDeclaration();
+ }
+ if (auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) {
+ auto *UY = cast<UnresolvedUsingValueDecl>(Y);
+ return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
+ UX->isAccessDeclaration() == UY->isAccessDeclaration();
+ }
+ if (auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X))
+ return isSameQualifier(
+ UX->getQualifier(),
+ cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier());
+
+ // Namespace alias definitions with the same target match.
+ if (auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) {
+ auto *NAY = cast<NamespaceAliasDecl>(Y);
+ return NAX->getNamespace()->Equals(NAY->getNamespace());
+ }
+
// FIXME: Many other cases to implement.
return false;
}
@@ -2387,8 +2533,14 @@ static DeclContext *getPrimaryContextForMerging(DeclContext *DC) {
if (NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC))
return ND->getOriginalNamespace();
+ // There is one tricky case here: if DC is a class with no definition, then
+ // we're merging a declaration whose definition is added by an update record,
+ // but we've not yet loaded that update record. In this case, we use the
+ // canonical declaration for merging until we get a real definition.
+ // FIXME: When we add a definition, we may need to move the partial lookup
+ // information from the canonical declaration onto the chosen definition.
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC))
- return RD->getDefinition();
+ return RD->getPrimaryContext();
if (EnumDecl *ED = dyn_cast<EnumDecl>(DC))
return ED->getASTContext().getLangOpts().CPlusPlus? ED->getDefinition()
@@ -2401,9 +2553,17 @@ ASTDeclReader::FindExistingResult::~FindExistingResult() {
if (!AddResult || Existing)
return;
+ DeclarationName Name = New->getDeclName();
DeclContext *DC = New->getDeclContext()->getRedeclContext();
- if (DC->isTranslationUnit() && Reader.SemaObj) {
- Reader.SemaObj->IdResolver.tryAddTopLevelDecl(New, New->getDeclName());
+ if (TypedefNameForLinkage) {
+ Reader.ImportedTypedefNamesForLinkage.insert(
+ std::make_pair(std::make_pair(DC, TypedefNameForLinkage), New));
+ } else if (!Name) {
+ assert(needsAnonymousDeclarationNumber(New));
+ setAnonymousDeclForMerging(Reader, New->getLexicalDeclContext(),
+ AnonymousDeclNumber, New);
+ } else if (DC->isTranslationUnit() && Reader.SemaObj) {
+ Reader.SemaObj->IdResolver.tryAddTopLevelDecl(New, Name);
} else if (DeclContext *MergeDC = getPrimaryContextForMerging(DC)) {
// Add the declaration to its redeclaration context so later merging
// lookups will find it.
@@ -2411,11 +2571,81 @@ ASTDeclReader::FindExistingResult::~FindExistingResult() {
}
}
+/// Find the declaration that should be merged into, given the declaration found
+/// by name lookup. If we're merging an anonymous declaration within a typedef,
+/// we need a matching typedef, and we merge with the type inside it.
+static NamedDecl *getDeclForMerging(NamedDecl *Found,
+ bool IsTypedefNameForLinkage) {
+ if (!IsTypedefNameForLinkage)
+ return Found;
+
+ // If we found a typedef declaration that gives a name to some other
+ // declaration, then we want that inner declaration. Declarations from
+ // AST files are handled via ImportedTypedefNamesForLinkage.
+ if (Found->isFromASTFile()) return 0;
+ if (auto *TND = dyn_cast<TypedefNameDecl>(Found)) {
+ if (auto *TT = TND->getTypeSourceInfo()->getType()->getAs<TagType>())
+ if (TT->getDecl()->getTypedefNameForAnonDecl() == TND)
+ return TT->getDecl();
+ }
+
+ return 0;
+}
+
+NamedDecl *ASTDeclReader::getAnonymousDeclForMerging(ASTReader &Reader,
+ DeclContext *DC,
+ unsigned Index) {
+ // If the lexical context has been merged, look into the now-canonical
+ // definition.
+ if (auto *Merged = Reader.MergedDeclContexts.lookup(DC))
+ DC = Merged;
+
+ // If we've seen this before, return the canonical declaration.
+ auto &Previous = Reader.AnonymousDeclarationsForMerging[DC];
+ if (Index < Previous.size() && Previous[Index])
+ return Previous[Index];
+
+ // If this is the first time, but we have parsed a declaration of the context,
+ // build the anonymous declaration list from the parsed declaration.
+ if (!cast<Decl>(DC)->isFromASTFile()) {
+ unsigned Index = 0;
+ for (Decl *LexicalD : DC->decls()) {
+ auto *ND = dyn_cast<NamedDecl>(LexicalD);
+ if (!ND || !needsAnonymousDeclarationNumber(ND))
+ continue;
+ if (Previous.size() == Index)
+ Previous.push_back(cast<NamedDecl>(ND->getCanonicalDecl()));
+ else
+ Previous[Index] = cast<NamedDecl>(ND->getCanonicalDecl());
+ ++Index;
+ }
+ }
+
+ return Index < Previous.size() ? Previous[Index] : nullptr;
+}
+
+void ASTDeclReader::setAnonymousDeclForMerging(ASTReader &Reader,
+ DeclContext *DC, unsigned Index,
+ NamedDecl *D) {
+ if (auto *Merged = Reader.MergedDeclContexts.lookup(DC))
+ DC = Merged;
+
+ auto &Previous = Reader.AnonymousDeclarationsForMerging[DC];
+ if (Index >= Previous.size())
+ Previous.resize(Index + 1);
+ if (!Previous[Index])
+ Previous[Index] = D;
+}
+
ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
- DeclarationName Name = D->getDeclName();
- if (!Name) {
- // Don't bother trying to find unnamed declarations.
- FindExistingResult Result(Reader, D, /*Existing=*/nullptr);
+ DeclarationName Name = TypedefNameForLinkage ? TypedefNameForLinkage
+ : D->getDeclName();
+
+ if (!Name && !needsAnonymousDeclarationNumber(D)) {
+ // Don't bother trying to find unnamed declarations that are in
+ // unmergeable contexts.
+ FindExistingResult Result(Reader, D, /*Existing=*/nullptr,
+ AnonymousDeclNumber, TypedefNameForLinkage);
// FIXME: We may still need to pull in the redeclaration chain; there can
// be redeclarations via 'decltype'.
Result.suppress();
@@ -2426,7 +2656,27 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
// necessary merging already.
DeclContext *DC = D->getDeclContext()->getRedeclContext();
- if (DC->isTranslationUnit() && Reader.SemaObj) {
+ if (TypedefNameForLinkage) {
+ auto It = Reader.ImportedTypedefNamesForLinkage.find(
+ std::make_pair(DC, TypedefNameForLinkage));
+ if (It != Reader.ImportedTypedefNamesForLinkage.end())
+ if (isSameEntity(It->second, D))
+ return FindExistingResult(Reader, D, It->second, AnonymousDeclNumber,
+ TypedefNameForLinkage);
+ // Go on to check in other places in case an existing typedef name
+ // was not imported.
+ }
+
+ if (!Name) {
+ // This is an anonymous declaration that we may need to merge. Look it up
+ // in its context by number.
+ assert(needsAnonymousDeclarationNumber(D));
+ if (auto *Existing = getAnonymousDeclForMerging(
+ Reader, D->getLexicalDeclContext(), AnonymousDeclNumber))
+ if (isSameEntity(Existing, D))
+ return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber,
+ TypedefNameForLinkage);
+ } else if (DC->isTranslationUnit() && Reader.SemaObj) {
IdentifierResolver &IdResolver = Reader.SemaObj->IdResolver;
// Temporarily consider the identifier to be up-to-date. We don't want to
@@ -2455,14 +2705,18 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
for (IdentifierResolver::iterator I = IdResolver.begin(Name),
IEnd = IdResolver.end();
I != IEnd; ++I) {
- if (isSameEntity(*I, D))
- return FindExistingResult(Reader, D, *I);
+ if (NamedDecl *Existing = getDeclForMerging(*I, TypedefNameForLinkage))
+ if (isSameEntity(Existing, D))
+ return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber,
+ TypedefNameForLinkage);
}
} else if (DeclContext *MergeDC = getPrimaryContextForMerging(DC)) {
DeclContext::lookup_result R = MergeDC->noload_lookup(Name);
for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) {
- if (isSameEntity(*I, D))
- return FindExistingResult(Reader, D, *I);
+ if (NamedDecl *Existing = getDeclForMerging(*I, TypedefNameForLinkage))
+ if (isSameEntity(Existing, D))
+ return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber,
+ TypedefNameForLinkage);
}
} else {
// Not in a mergeable context.
@@ -2474,29 +2728,78 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
//
// FIXME: We should do something similar if we merge two definitions of the
// same template specialization into the same CXXRecordDecl.
- if (Reader.MergedDeclContexts.count(D->getLexicalDeclContext()))
+ auto MergedDCIt = Reader.MergedDeclContexts.find(D->getLexicalDeclContext());
+ if (MergedDCIt != Reader.MergedDeclContexts.end() &&
+ MergedDCIt->second == D->getDeclContext())
Reader.PendingOdrMergeChecks.push_back(D);
- return FindExistingResult(Reader, D, /*Existing=*/nullptr);
+ return FindExistingResult(Reader, D, /*Existing=*/nullptr,
+ AnonymousDeclNumber, TypedefNameForLinkage);
}
template<typename DeclT>
-void ASTDeclReader::attachPreviousDeclImpl(Redeclarable<DeclT> *D,
+void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
+ Redeclarable<DeclT> *D,
Decl *Previous) {
D->RedeclLink.setPrevious(cast<DeclT>(Previous));
}
-void ASTDeclReader::attachPreviousDeclImpl(...) {
+namespace clang {
+template<>
+void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
+ Redeclarable<FunctionDecl> *D,
+ Decl *Previous) {
+ FunctionDecl *FD = static_cast<FunctionDecl*>(D);
+ FunctionDecl *PrevFD = cast<FunctionDecl>(Previous);
+
+ FD->RedeclLink.setPrevious(PrevFD);
+
+ // If the previous declaration is an inline function declaration, then this
+ // declaration is too.
+ if (PrevFD->IsInline != FD->IsInline) {
+ // FIXME: [dcl.fct.spec]p4:
+ // If a function with external linkage is declared inline in one
+ // translation unit, it shall be declared inline in all translation
+ // units in which it appears.
+ //
+ // Be careful of this case:
+ //
+ // module A:
+ // template<typename T> struct X { void f(); };
+ // template<typename T> inline void X<T>::f() {}
+ //
+ // module B instantiates the declaration of X<int>::f
+ // module C instantiates the definition of X<int>::f
+ //
+ // If module B and C are merged, we do not have a violation of this rule.
+ FD->IsInline = true;
+ }
+
+ // If this declaration has an unresolved exception specification but the
+ // previous declaration had a resolved one, resolve the exception
+ // specification now.
+ auto *FPT = FD->getType()->getAs<FunctionProtoType>();
+ auto *PrevFPT = PrevFD->getType()->getAs<FunctionProtoType>();
+ if (FPT && PrevFPT &&
+ isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
+ !isUnresolvedExceptionSpec(PrevFPT->getExceptionSpecType())) {
+ Reader.Context.adjustExceptionSpec(
+ FD, PrevFPT->getExtProtoInfo().ExceptionSpec);
+ }
+}
+}
+void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader, ...) {
llvm_unreachable("attachPreviousDecl on non-redeclarable declaration");
}
-void ASTDeclReader::attachPreviousDecl(Decl *D, Decl *Previous) {
+void ASTDeclReader::attachPreviousDecl(ASTReader &Reader, Decl *D,
+ Decl *Previous) {
assert(D && Previous);
switch (D->getKind()) {
#define ABSTRACT_DECL(TYPE)
-#define DECL(TYPE, BASE) \
- case Decl::TYPE: \
- attachPreviousDeclImpl(cast<TYPE##Decl>(D), Previous); \
+#define DECL(TYPE, BASE) \
+ case Decl::TYPE: \
+ attachPreviousDeclImpl(Reader, cast<TYPE##Decl>(D), Previous); \
break;
#include "clang/AST/DeclNodes.inc"
}
@@ -2514,32 +2817,6 @@ void ASTDeclReader::attachPreviousDecl(Decl *D, Decl *Previous) {
// be too.
if (Previous->Used)
D->Used = true;
-
- // If the previous declaration is an inline function declaration, then this
- // declaration is too.
- if (auto *FD = dyn_cast<FunctionDecl>(D)) {
- if (cast<FunctionDecl>(Previous)->IsInline != FD->IsInline) {
- // FIXME: [dcl.fct.spec]p4:
- // If a function with external linkage is declared inline in one
- // translation unit, it shall be declared inline in all translation
- // units in which it appears.
- //
- // Be careful of this case:
- //
- // module A:
- // template<typename T> struct X { void f(); };
- // template<typename T> inline void X<T>::f() {}
- //
- // module B instantiates the declaration of X<int>::f
- // module C instantiates the definition of X<int>::f
- //
- // If module B and C are merged, we do not have a violation of this rule.
- //
- //if (!FD->IsInline || Previous->getOwningModule())
- // Diag(FD->getLocation(), diag::err_odr_differing_inline);
- FD->IsInline = true;
- }
- }
}
template<typename DeclT>
@@ -2593,11 +2870,11 @@ ASTReader::combineStoredMergedDecls(Decl *Canon, GlobalDeclID CanonID) {
// Append the stored merged declarations to the merged declarations set.
MergedDeclsMap::iterator Pos = MergedDecls.find(Canon);
if (Pos == MergedDecls.end())
- Pos = MergedDecls.insert(std::make_pair(Canon,
+ Pos = MergedDecls.insert(std::make_pair(Canon,
SmallVector<DeclID, 2>())).first;
Pos->second.append(StoredPos->second.begin(), StoredPos->second.end());
StoredMergedDecls.erase(StoredPos);
-
+
// Sort and uniquify the set of merged declarations.
llvm::array_pod_sort(Pos->second.begin(), Pos->second.end());
Pos->second.erase(std::unique(Pos->second.begin(), Pos->second.end()),
@@ -2927,13 +3204,13 @@ namespace {
class RedeclChainVisitor {
ASTReader &Reader;
SmallVectorImpl<DeclID> &SearchDecls;
- llvm::SmallPtrSet<Decl *, 16> &Deserialized;
+ llvm::SmallPtrSetImpl<Decl *> &Deserialized;
GlobalDeclID CanonID;
SmallVector<Decl *, 4> Chain;
public:
RedeclChainVisitor(ASTReader &Reader, SmallVectorImpl<DeclID> &SearchDecls,
- llvm::SmallPtrSet<Decl *, 16> &Deserialized,
+ llvm::SmallPtrSetImpl<Decl *> &Deserialized,
GlobalDeclID CanonID)
: Reader(Reader), SearchDecls(SearchDecls), Deserialized(Deserialized),
CanonID(CanonID) {
@@ -3038,7 +3315,7 @@ void ASTReader::loadPendingDeclChain(serialization::GlobalDeclID ID) {
if (Chain[I] == CanonDecl)
continue;
- ASTDeclReader::attachPreviousDecl(Chain[I], MostRecent);
+ ASTDeclReader::attachPreviousDecl(*this, Chain[I], MostRecent);
MostRecent = Chain[I];
}
@@ -3052,7 +3329,7 @@ namespace {
ASTReader &Reader;
serialization::GlobalDeclID InterfaceID;
ObjCInterfaceDecl *Interface;
- llvm::SmallPtrSet<ObjCCategoryDecl *, 16> &Deserialized;
+ llvm::SmallPtrSetImpl<ObjCCategoryDecl *> &Deserialized;
unsigned PreviousGeneration;
ObjCCategoryDecl *Tail;
llvm::DenseMap<DeclarationName, ObjCCategoryDecl *> NameCategoryMap;
@@ -3100,7 +3377,7 @@ namespace {
ObjCCategoriesVisitor(ASTReader &Reader,
serialization::GlobalDeclID InterfaceID,
ObjCInterfaceDecl *Interface,
- llvm::SmallPtrSet<ObjCCategoryDecl *, 16> &Deserialized,
+ llvm::SmallPtrSetImpl<ObjCCategoryDecl *> &Deserialized,
unsigned PreviousGeneration)
: Reader(Reader), InterfaceID(InterfaceID), Interface(Interface),
Deserialized(Deserialized), PreviousGeneration(PreviousGeneration),
@@ -3168,13 +3445,80 @@ void ASTReader::loadObjCCategories(serialization::GlobalDeclID ID,
ModuleMgr.visit(ObjCCategoriesVisitor::visit, &Visitor);
}
+namespace {
+/// Iterator over the redeclarations of a declaration that have already
+/// been merged into the same redeclaration chain.
+template<typename DeclT>
+class MergedRedeclIterator {
+ DeclT *Start, *Canonical, *Current;
+public:
+ MergedRedeclIterator() : Current(nullptr) {}
+ MergedRedeclIterator(DeclT *Start)
+ : Start(Start), Canonical(nullptr), Current(Start) {}
+
+ DeclT *operator*() { return Current; }
+
+ MergedRedeclIterator &operator++() {
+ if (Current->isFirstDecl()) {
+ Canonical = Current;
+ Current = Current->getMostRecentDecl();
+ } else
+ Current = Current->getPreviousDecl();
+
+ // If we started in the merged portion, we'll reach our start position
+ // eventually. Otherwise, we'll never reach it, but the second declaration
+ // we reached was the canonical declaration, so stop when we see that one
+ // again.
+ if (Current == Start || Current == Canonical)
+ Current = nullptr;
+ return *this;
+ }
+
+ friend bool operator!=(const MergedRedeclIterator &A,
+ const MergedRedeclIterator &B) {
+ return A.Current != B.Current;
+ }
+};
+}
+template<typename DeclT>
+llvm::iterator_range<MergedRedeclIterator<DeclT>> merged_redecls(DeclT *D) {
+ return llvm::iterator_range<MergedRedeclIterator<DeclT>>(
+ MergedRedeclIterator<DeclT>(D),
+ MergedRedeclIterator<DeclT>());
+}
+
+template<typename DeclT, typename Fn>
+static void forAllLaterRedecls(DeclT *D, Fn F) {
+ F(D);
+
+ // Check whether we've already merged D into its redeclaration chain.
+ // MostRecent may or may not be nullptr if D has not been merged. If
+ // not, walk the merged redecl chain and see if it's there.
+ auto *MostRecent = D->getMostRecentDecl();
+ bool Found = false;
+ for (auto *Redecl = MostRecent; Redecl && !Found;
+ Redecl = Redecl->getPreviousDecl())
+ Found = (Redecl == D);
+
+ // If this declaration is merged, apply the functor to all later decls.
+ if (Found) {
+ for (auto *Redecl = MostRecent; Redecl != D;
+ Redecl = Redecl->getPreviousDecl())
+ F(Redecl);
+ }
+}
+
void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
const RecordData &Record) {
while (Idx < Record.size()) {
switch ((DeclUpdateKind)Record[Idx++]) {
case UPD_CXX_ADDED_IMPLICIT_MEMBER: {
+ // FIXME: If we also have an update record for instantiating the
+ // definition of D, we need that to happen before we get here.
Decl *MD = Reader.ReadDecl(ModuleFile, Record, Idx);
assert(MD && "couldn't read decl from update record");
+ // FIXME: We should call addHiddenDecl instead, to add the member
+ // to its DeclContext.
cast<CXXRecordDecl>(D)->addedMember(MD);
break;
}
@@ -3191,7 +3535,8 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
// Each module has its own anonymous namespace, which is disjoint from
// any other module's anonymous namespaces, so don't attach the anonymous
// namespace at all.
- if (ModuleFile.Kind != MK_Module) {
+ if (ModuleFile.Kind != MK_ImplicitModule &&
+ ModuleFile.Kind != MK_ExplicitModule) {
if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(D))
TU->setAnonymousNamespace(Anon);
else
@@ -3205,7 +3550,7 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
Reader.ReadSourceLocation(ModuleFile, Record, Idx));
break;
- case UPD_CXX_INSTANTIATED_FUNCTION_DEFINITION: {
+ case UPD_CXX_ADDED_FUNCTION_DEFINITION: {
FunctionDecl *FD = cast<FunctionDecl>(D);
if (Reader.PendingBodies[FD]) {
// FIXME: Maybe check for ODR violations.
@@ -3217,17 +3562,18 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
// Maintain AST consistency: any later redeclarations of this function
// are inline if this one is. (We might have merged another declaration
// into this one.)
- for (auto *D = FD->getMostRecentDecl(); /**/;
- D = D->getPreviousDecl()) {
- D->setImplicitlyInline();
- if (D == FD)
- break;
- }
+ forAllLaterRedecls(FD, [](FunctionDecl *FD) {
+ FD->setImplicitlyInline();
+ });
}
FD->setInnerLocStart(Reader.ReadSourceLocation(ModuleFile, Record, Idx));
if (auto *CD = dyn_cast<CXXConstructorDecl>(FD))
std::tie(CD->CtorInitializers, CD->NumCtorInitializers) =
Reader.ReadCXXCtorInitializers(ModuleFile, Record, Idx);
+ if (auto *DD = dyn_cast<CXXDestructorDecl>(FD))
+ // FIXME: Check consistency.
+ DD->setOperatorDelete(Reader.ReadDeclAs<FunctionDecl>(ModuleFile,
+ Record, Idx));
// Store the offset of the body so we can lazily load it later.
Reader.PendingBodies[FD] = GetCurrentCursorOffset();
HasPendingBody = true;
@@ -3246,6 +3592,7 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
Reader.ReadDeclContextStorage(ModuleFile, ModuleFile.DeclsCursor,
std::make_pair(LexicalOffset, 0),
ModuleFile.DeclContextInfos[RD]);
+ Reader.PendingDefinitions.insert(RD);
}
auto TSK = (TemplateSpecializationKind)Record[Idx++];
@@ -3267,7 +3614,12 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
auto *TemplArgList = TemplateArgumentList::CreateCopy(
Reader.getContext(), TemplArgs.data(), TemplArgs.size());
- Spec->setInstantiationOf(PartialSpec, TemplArgList);
+
+ // FIXME: If we already have a partial specialization set,
+ // check that it matches.
+ if (!Spec->getSpecializedTemplateOrPartial()
+ .is<ClassTemplatePartialSpecializationDecl *>())
+ Spec->setInstantiationOf(PartialSpec, TemplArgList);
}
}
@@ -3285,20 +3637,35 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
}
case UPD_CXX_RESOLVED_EXCEPTION_SPEC: {
- auto *FD = cast<FunctionDecl>(D);
- auto *FPT = FD->getType()->castAs<FunctionProtoType>();
- auto EPI = FPT->getExtProtoInfo();
+ // FIXME: This doesn't send the right notifications if there are
+ // ASTMutationListeners other than an ASTWriter.
+ FunctionProtoType::ExceptionSpecInfo ESI;
SmallVector<QualType, 8> ExceptionStorage;
- Reader.readExceptionSpec(ModuleFile, ExceptionStorage, EPI, Record, Idx);
- FD->setType(Reader.Context.getFunctionType(FPT->getReturnType(),
- FPT->getParamTypes(), EPI));
+ Reader.readExceptionSpec(ModuleFile, ExceptionStorage, ESI, Record, Idx);
+ for (auto *Redecl : merged_redecls(D)) {
+ auto *FD = cast<FunctionDecl>(Redecl);
+ auto *FPT = FD->getType()->castAs<FunctionProtoType>();
+ if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType())) {
+ // AST invariant: if any exception spec in the redecl chain is
+ // resolved, all are resolved. We don't need to go any further.
+ // FIXME: If the exception spec is resolved, check that it matches.
+ break;
+ }
+ FD->setType(Reader.Context.getFunctionType(
+ FPT->getReturnType(), FPT->getParamTypes(),
+ FPT->getExtProtoInfo().withExceptionSpec(ESI)));
+ }
break;
}
case UPD_CXX_DEDUCED_RETURN_TYPE: {
- FunctionDecl *FD = cast<FunctionDecl>(D);
- Reader.Context.adjustDeducedFunctionResultType(
- FD, Reader.readType(ModuleFile, Record, Idx));
+ // FIXME: Also do this when merging redecls.
+ QualType DeducedResultType = Reader.readType(ModuleFile, Record, Idx);
+ for (auto *Redecl : merged_redecls(D)) {
+ // FIXME: If the return type is already deduced, check that it matches.
+ FunctionDecl *FD = cast<FunctionDecl>(Redecl);
+ Reader.Context.adjustDeducedFunctionResultType(FD, DeducedResultType);
+ }
break;
}
@@ -3306,17 +3673,8 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
// FIXME: This doesn't send the right notifications if there are
// ASTMutationListeners other than an ASTWriter.
- // FIXME: We can't both pull in declarations (and thus create new pending
- // redeclaration chains) *and* walk redeclaration chains in this function.
- // We should defer the updates that require walking redecl chains.
-
// Maintain AST consistency: any later redeclarations are used too.
- for (auto *Redecl = D->getMostRecentDecl(); /**/;
- Redecl = Redecl->getPreviousDecl()) {
- Redecl->Used = true;
- if (Redecl == D)
- break;
- }
+ forAllLaterRedecls(D, [](Decl *D) { D->Used = true; });
break;
}
@@ -3327,6 +3685,10 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
case UPD_STATIC_LOCAL_NUMBER:
Reader.Context.setStaticLocalNumber(cast<VarDecl>(D), Record[Idx++]);
break;
+ case UPD_DECL_MARKED_OPENMP_THREADPRIVATE:
+ D->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
+ Reader.Context, ReadSourceRange(Record, Idx)));
+ break;
}
}
}
diff --git a/lib/Serialization/ASTReaderInternals.h b/lib/Serialization/ASTReaderInternals.h
index a63e362eb64f..d1b032b27ac2 100644
--- a/lib/Serialization/ASTReaderInternals.h
+++ b/lib/Serialization/ASTReaderInternals.h
@@ -10,8 +10,8 @@
// This file provides internal definitions used in the AST reader.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SERIALIZATION_ASTREADER_INTERNALS_H
-#define LLVM_CLANG_SERIALIZATION_ASTREADER_INTERNALS_H
+#ifndef LLVM_CLANG_LIB_SERIALIZATION_ASTREADERINTERNALS_H
+#define LLVM_CLANG_LIB_SERIALIZATION_ASTREADERINTERNALS_H
#include "clang/AST/DeclarationName.h"
#include "clang/Serialization/ASTBitCodes.h"
@@ -156,6 +156,8 @@ public:
SelectorID ID;
unsigned InstanceBits;
unsigned FactoryBits;
+ bool InstanceHasMoreThanOneDecl;
+ bool FactoryHasMoreThanOneDecl;
SmallVector<ObjCMethodDecl *, 2> Instance;
SmallVector<ObjCMethodDecl *, 2> Factory;
};
@@ -194,8 +196,8 @@ typedef llvm::OnDiskChainedHashTable<ASTSelectorLookupTrait>
///
/// The on-disk hash table contains a mapping from each header path to
/// information about that header (how many times it has been included, its
-/// controlling macro, etc.). Note that we actually hash based on the
-/// filename, and support "deep" comparisons of file names based on current
+/// controlling macro, etc.). Note that we actually hash based on the size
+/// and mtime, and support "deep" comparisons of file names based on current
/// inode numbers, so that the search can cope with non-normalized path names
/// and symlinks.
class HeaderFileInfoTrait {
@@ -211,6 +213,7 @@ public:
off_t Size;
time_t ModTime;
const char *Filename;
+ bool Imported;
};
typedef const internal_key_type &internal_key_ref;
diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp
index 8bf17d51b3e8..4ef2e73062a5 100644
--- a/lib/Serialization/ASTReaderStmt.cpp
+++ b/lib/Serialization/ASTReaderStmt.cpp
@@ -135,8 +135,8 @@ void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
while (NumStmts--)
Stmts.push_back(Reader.ReadSubStmt());
S->setStmts(Reader.getContext(), Stmts.data(), Stmts.size());
- S->setLBracLoc(ReadSourceLocation(Record, Idx));
- S->setRBracLoc(ReadSourceLocation(Record, Idx));
+ S->LBraceLoc = ReadSourceLocation(Record, Idx);
+ S->RBraceLoc = ReadSourceLocation(Record, Idx);
}
void ASTStmtReader::VisitSwitchCase(SwitchCase *S) {
@@ -422,7 +422,8 @@ void ASTStmtReader::VisitExpr(Expr *E) {
void ASTStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
VisitExpr(E);
E->setLocation(ReadSourceLocation(Record, Idx));
- E->setIdentType((PredefinedExpr::IdentType)Record[Idx++]);
+ E->Type = (PredefinedExpr::IdentType)Record[Idx++];
+ E->FnName = cast_or_null<StringLiteral>(Reader.ReadSubExpr());
}
void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
@@ -432,7 +433,7 @@ void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
E->DeclRefExprBits.HasFoundDecl = Record[Idx++];
E->DeclRefExprBits.HasTemplateKWAndArgsInfo = Record[Idx++];
E->DeclRefExprBits.HadMultipleCandidates = Record[Idx++];
- E->DeclRefExprBits.RefersToEnclosingLocal = Record[Idx++];
+ E->DeclRefExprBits.RefersToEnclosingVariableOrCapture = Record[Idx++];
unsigned NumTemplateArgs = 0;
if (E->hasTemplateKWAndArgsInfo())
NumTemplateArgs = Record[Idx++];
@@ -634,7 +635,7 @@ void ASTStmtReader::VisitCastExpr(CastExpr *E) {
unsigned NumBaseSpecs = Record[Idx++];
assert(NumBaseSpecs == E->path_size());
E->setSubExpr(Reader.ReadSubExpr());
- E->setCastKind((CastExpr::CastKind)Record[Idx++]);
+ E->setCastKind((CastKind)Record[Idx++]);
CastExpr::path_iterator BaseI = E->path_begin();
while (NumBaseSpecs--) {
CXXBaseSpecifier *BaseSpec = new (Reader.getContext()) CXXBaseSpecifier;
@@ -1579,12 +1580,26 @@ void ASTStmtReader::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
E->setExtendingDecl(VD, ManglingNumber);
}
+void ASTStmtReader::VisitCXXFoldExpr(CXXFoldExpr *E) {
+ VisitExpr(E);
+ E->LParenLoc = ReadSourceLocation(Record, Idx);
+ E->EllipsisLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->SubExprs[0] = Reader.ReadSubExpr();
+ E->SubExprs[1] = Reader.ReadSubExpr();
+ E->Opcode = (BinaryOperatorKind)Record[Idx++];
+}
+
void ASTStmtReader::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
VisitExpr(E);
E->SourceExpr = Reader.ReadSubExpr();
E->Loc = ReadSourceLocation(Record, Idx);
}
+void ASTStmtReader::VisitTypoExpr(TypoExpr *E) {
+ llvm_unreachable("Cannot read TypoExpr nodes");
+}
+
//===----------------------------------------------------------------------===//
// Microsoft Expressions and Statements
//===----------------------------------------------------------------------===//
@@ -1715,6 +1730,21 @@ OMPClause *OMPClauseReader::readClause() {
case OMPC_mergeable:
C = new (Context) OMPMergeableClause();
break;
+ case OMPC_read:
+ C = new (Context) OMPReadClause();
+ break;
+ case OMPC_write:
+ C = new (Context) OMPWriteClause();
+ break;
+ case OMPC_update:
+ C = new (Context) OMPUpdateClause();
+ break;
+ case OMPC_capture:
+ C = new (Context) OMPCaptureClause();
+ break;
+ case OMPC_seq_cst:
+ C = new (Context) OMPSeqCstClause();
+ break;
case OMPC_private:
C = OMPPrivateClause::CreateEmpty(Context, Record[Idx++]);
break;
@@ -1809,6 +1839,16 @@ void OMPClauseReader::VisitOMPUntiedClause(OMPUntiedClause *) {}
void OMPClauseReader::VisitOMPMergeableClause(OMPMergeableClause *) {}
+void OMPClauseReader::VisitOMPReadClause(OMPReadClause *) {}
+
+void OMPClauseReader::VisitOMPWriteClause(OMPWriteClause *) {}
+
+void OMPClauseReader::VisitOMPUpdateClause(OMPUpdateClause *) {}
+
+void OMPClauseReader::VisitOMPCaptureClause(OMPCaptureClause *) {}
+
+void OMPClauseReader::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
+
void OMPClauseReader::VisitOMPPrivateClause(OMPPrivateClause *C) {
C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
unsigned NumVars = C->varlist_size();
@@ -1817,6 +1857,10 @@ void OMPClauseReader::VisitOMPPrivateClause(OMPPrivateClause *C) {
for (unsigned i = 0; i != NumVars; ++i)
Vars.push_back(Reader->Reader.ReadSubExpr());
C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setPrivateCopies(Vars);
}
void OMPClauseReader::VisitOMPFirstprivateClause(OMPFirstprivateClause *C) {
@@ -1827,6 +1871,14 @@ void OMPClauseReader::VisitOMPFirstprivateClause(OMPFirstprivateClause *C) {
for (unsigned i = 0; i != NumVars; ++i)
Vars.push_back(Reader->Reader.ReadSubExpr());
C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setPrivateCopies(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setInits(Vars);
}
void OMPClauseReader::VisitOMPLastprivateClause(OMPLastprivateClause *C) {
@@ -1936,6 +1988,45 @@ void ASTStmtReader::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
E->setAssociatedStmt(Reader.ReadSubStmt());
}
+void ASTStmtReader::VisitOMPLoopDirective(OMPLoopDirective *D) {
+ VisitStmt(D);
+ // Two fields (NumClauses and CollapsedNum) were read in ReadStmtFromStream.
+ Idx += 2;
+ VisitOMPExecutableDirective(D);
+ D->setIterationVariable(Reader.ReadSubExpr());
+ D->setLastIteration(Reader.ReadSubExpr());
+ D->setCalcLastIteration(Reader.ReadSubExpr());
+ D->setPreCond(Reader.ReadSubExpr());
+ auto Fst = Reader.ReadSubExpr();
+ auto Snd = Reader.ReadSubExpr();
+ D->setCond(Fst, Snd);
+ D->setInit(Reader.ReadSubExpr());
+ D->setInc(Reader.ReadSubExpr());
+ if (isOpenMPWorksharingDirective(D->getDirectiveKind())) {
+ D->setIsLastIterVariable(Reader.ReadSubExpr());
+ D->setLowerBoundVariable(Reader.ReadSubExpr());
+ D->setUpperBoundVariable(Reader.ReadSubExpr());
+ D->setStrideVariable(Reader.ReadSubExpr());
+ D->setEnsureUpperBound(Reader.ReadSubExpr());
+ D->setNextLowerBound(Reader.ReadSubExpr());
+ D->setNextUpperBound(Reader.ReadSubExpr());
+ }
+ SmallVector<Expr *, 4> Sub;
+ unsigned CollapsedNum = D->getCollapsedNumber();
+ Sub.reserve(CollapsedNum);
+ for (unsigned i = 0; i < CollapsedNum; ++i)
+ Sub.push_back(Reader.ReadSubExpr());
+ D->setCounters(Sub);
+ Sub.clear();
+ for (unsigned i = 0; i < CollapsedNum; ++i)
+ Sub.push_back(Reader.ReadSubExpr());
+ D->setUpdates(Sub);
+ Sub.clear();
+ for (unsigned i = 0; i < CollapsedNum; ++i)
+ Sub.push_back(Reader.ReadSubExpr());
+ D->setFinals(Sub);
+}
+
void ASTStmtReader::VisitOMPParallelDirective(OMPParallelDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
@@ -1944,17 +2035,15 @@ void ASTStmtReader::VisitOMPParallelDirective(OMPParallelDirective *D) {
}
void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
- VisitStmt(D);
- // Two fields (NumClauses and CollapsedNum) were read in ReadStmtFromStream.
- Idx += 2;
- VisitOMPExecutableDirective(D);
+ VisitOMPLoopDirective(D);
}
void ASTStmtReader::VisitOMPForDirective(OMPForDirective *D) {
- VisitStmt(D);
- // Two fields (NumClauses and CollapsedNum) were read in ReadStmtFromStream.
- Idx += 2;
- VisitOMPExecutableDirective(D);
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPForSimdDirective(OMPForSimdDirective *D) {
+ VisitOMPLoopDirective(D);
}
void ASTStmtReader::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
@@ -1988,10 +2077,12 @@ void ASTStmtReader::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
}
void ASTStmtReader::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
- VisitStmt(D);
- // Two fields (NumClauses and CollapsedNum) were read in ReadStmtFromStream.
- Idx += 2;
- VisitOMPExecutableDirective(D);
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPParallelForSimdDirective(
+ OMPParallelForSimdDirective *D) {
+ VisitOMPLoopDirective(D);
}
void ASTStmtReader::VisitOMPParallelSectionsDirective(
@@ -2031,6 +2122,35 @@ void ASTStmtReader::VisitOMPFlushDirective(OMPFlushDirective *D) {
VisitOMPExecutableDirective(D);
}
+void ASTStmtReader::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPAtomicDirective(OMPAtomicDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+ D->setX(Reader.ReadSubExpr());
+ D->setV(Reader.ReadSubExpr());
+ D->setExpr(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitOMPTargetDirective(OMPTargetDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTeamsDirective(OMPTeamsDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+}
+
//===----------------------------------------------------------------------===//
// ASTReader Implementation
//===----------------------------------------------------------------------===//
@@ -2529,6 +2649,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_FOR_SIMD_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPForSimdDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
+ Empty);
+ break;
+ }
+
case STMT_OMP_SECTIONS_DIRECTIVE:
S = OMPSectionsDirective::CreateEmpty(
Context, Record[ASTStmtReader::NumStmtFields], Empty);
@@ -2559,6 +2687,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_PARALLEL_FOR_SIMD_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPParallelForSimdDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
case STMT_OMP_PARALLEL_SECTIONS_DIRECTIVE:
S = OMPParallelSectionsDirective::CreateEmpty(
Context, Record[ASTStmtReader::NumStmtFields], Empty);
@@ -2586,6 +2722,25 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
+ case STMT_OMP_ORDERED_DIRECTIVE:
+ S = OMPOrderedDirective::CreateEmpty(Context, Empty);
+ break;
+
+ case STMT_OMP_ATOMIC_DIRECTIVE:
+ S = OMPAtomicDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_TARGET_DIRECTIVE:
+ S = OMPTargetDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_TEAMS_DIRECTIVE:
+ S = OMPTeamsDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
case EXPR_CXX_OPERATOR_CALL:
S = new (Context) CXXOperatorCallExpr(Context, Empty);
break;
@@ -2775,7 +2930,11 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_MATERIALIZE_TEMPORARY:
S = new (Context) MaterializeTemporaryExpr(Empty);
break;
-
+
+ case EXPR_CXX_FOLD:
+ S = new (Context) CXXFoldExpr(Empty);
+ break;
+
case EXPR_OPAQUE_VALUE:
S = new (Context) OpaqueValueExpr(Empty);
break;
diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp
index 0f52a9fd0408..6c60d45503de 100644
--- a/lib/Serialization/ASTWriter.cpp
+++ b/lib/Serialization/ASTWriter.cpp
@@ -51,6 +51,7 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
#include <algorithm>
#include <cstdio>
#include <string.h>
@@ -83,6 +84,8 @@ namespace {
public:
/// \brief Type code that corresponds to the record generated.
TypeCode Code;
+ /// \brief Abbreviation to use for the record, if any.
+ unsigned AbbrevToUse;
ASTTypeWriter(ASTWriter &Writer, ASTWriter::RecordDataImpl &Record)
: Writer(Writer), Record(Record), Code(TYPE_EXT_QUAL) { }
@@ -190,6 +193,9 @@ void ASTTypeWriter::VisitFunctionType(const FunctionType *T) {
// FIXME: need to stabilize encoding of calling convention...
Record.push_back(C.getCC());
Record.push_back(C.getProducesResult());
+
+ if (C.getHasRegParm() || C.getRegParm() || C.getProducesResult())
+ AbbrevToUse = 0;
}
void ASTTypeWriter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
@@ -216,14 +222,21 @@ static void addExceptionSpec(ASTWriter &Writer, const FunctionProtoType *T,
void ASTTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
VisitFunctionType(T);
- Record.push_back(T->getNumParams());
- for (unsigned I = 0, N = T->getNumParams(); I != N; ++I)
- Writer.AddTypeRef(T->getParamType(I), Record);
+
Record.push_back(T->isVariadic());
Record.push_back(T->hasTrailingReturn());
Record.push_back(T->getTypeQuals());
Record.push_back(static_cast<unsigned>(T->getRefQualifier()));
addExceptionSpec(Writer, T, Record);
+
+ Record.push_back(T->getNumParams());
+ for (unsigned I = 0, N = T->getNumParams(); I != N; ++I)
+ Writer.AddTypeRef(T->getParamType(I), Record);
+
+ if (T->isVariadic() || T->hasTrailingReturn() || T->getTypeQuals() ||
+ T->getRefQualifier() || T->getExceptionSpecType() != EST_None)
+ AbbrevToUse = 0;
+
Code = TYPE_FUNCTION_PROTO;
}
@@ -649,6 +662,40 @@ void TypeLocWriter::VisitAtomicTypeLoc(AtomicTypeLoc TL) {
Writer.AddSourceLocation(TL.getRParenLoc(), Record);
}
+void ASTWriter::WriteTypeAbbrevs() {
+ using namespace llvm;
+
+ BitCodeAbbrev *Abv;
+
+ // Abbreviation for TYPE_EXT_QUAL
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::TYPE_EXT_QUAL));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 3)); // Quals
+ TypeExtQualAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for TYPE_FUNCTION_PROTO
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::TYPE_FUNCTION_PROTO));
+ // FunctionType
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ReturnType
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // NoReturn
+ Abv->Add(BitCodeAbbrevOp(0)); // HasRegParm
+ Abv->Add(BitCodeAbbrevOp(0)); // RegParm
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // CC
+ Abv->Add(BitCodeAbbrevOp(0)); // ProducesResult
+ // FunctionProtoType
+ Abv->Add(BitCodeAbbrevOp(0)); // IsVariadic
+ Abv->Add(BitCodeAbbrevOp(0)); // HasTrailingReturn
+ Abv->Add(BitCodeAbbrevOp(0)); // TypeQuals
+ Abv->Add(BitCodeAbbrevOp(0)); // RefQualifier
+ Abv->Add(BitCodeAbbrevOp(EST_None)); // ExceptionSpec
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // NumParams
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Params
+ TypeFunctionProtoAbbrev = Stream.EmitAbbrev(Abv);
+}
+
//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
@@ -684,6 +731,7 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
#define RECORD(X) EmitRecordID(X, #X, Stream, Record)
RECORD(STMT_STOP);
RECORD(STMT_NULL_PTR);
+ RECORD(STMT_REF_PTR);
RECORD(STMT_NULL);
RECORD(STMT_COMPOUND);
RECORD(STMT_CASE);
@@ -711,6 +759,7 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_STRING_LITERAL);
RECORD(EXPR_CHARACTER_LITERAL);
RECORD(EXPR_PAREN);
+ RECORD(EXPR_PAREN_LIST);
RECORD(EXPR_UNARY_OPERATOR);
RECORD(EXPR_SIZEOF_ALIGN_OF);
RECORD(EXPR_ARRAY_SUBSCRIPT);
@@ -752,8 +801,13 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(STMT_OBJC_AT_SYNCHRONIZED);
RECORD(STMT_OBJC_AT_THROW);
RECORD(EXPR_OBJC_BOOL_LITERAL);
+ RECORD(STMT_CXX_CATCH);
+ RECORD(STMT_CXX_TRY);
+ RECORD(STMT_CXX_FOR_RANGE);
RECORD(EXPR_CXX_OPERATOR_CALL);
+ RECORD(EXPR_CXX_MEMBER_CALL);
RECORD(EXPR_CXX_CONSTRUCT);
+ RECORD(EXPR_CXX_TEMPORARY_OBJECT);
RECORD(EXPR_CXX_STATIC_CAST);
RECORD(EXPR_CXX_DYNAMIC_CAST);
RECORD(EXPR_CXX_REINTERPRET_CAST);
@@ -765,11 +819,10 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_CXX_NULL_PTR_LITERAL);
RECORD(EXPR_CXX_TYPEID_EXPR);
RECORD(EXPR_CXX_TYPEID_TYPE);
- RECORD(EXPR_CXX_UUIDOF_EXPR);
- RECORD(EXPR_CXX_UUIDOF_TYPE);
RECORD(EXPR_CXX_THIS);
RECORD(EXPR_CXX_THROW);
RECORD(EXPR_CXX_DEFAULT_ARG);
+ RECORD(EXPR_CXX_DEFAULT_INIT);
RECORD(EXPR_CXX_BIND_TEMPORARY);
RECORD(EXPR_CXX_SCALAR_VALUE_INIT);
RECORD(EXPR_CXX_NEW);
@@ -781,12 +834,22 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_CXX_UNRESOLVED_CONSTRUCT);
RECORD(EXPR_CXX_UNRESOLVED_MEMBER);
RECORD(EXPR_CXX_UNRESOLVED_LOOKUP);
+ RECORD(EXPR_CXX_EXPRESSION_TRAIT);
RECORD(EXPR_CXX_NOEXCEPT);
RECORD(EXPR_OPAQUE_VALUE);
+ RECORD(EXPR_BINARY_CONDITIONAL_OPERATOR);
+ RECORD(EXPR_TYPE_TRAIT);
+ RECORD(EXPR_ARRAY_TYPE_TRAIT);
RECORD(EXPR_PACK_EXPANSION);
RECORD(EXPR_SIZEOF_PACK);
+ RECORD(EXPR_SUBST_NON_TYPE_TEMPLATE_PARM);
RECORD(EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK);
+ RECORD(EXPR_FUNCTION_PARM_PACK);
+ RECORD(EXPR_MATERIALIZE_TEMPORARY);
RECORD(EXPR_CUDA_KERNEL_CALL);
+ RECORD(EXPR_CXX_UUIDOF_EXPR);
+ RECORD(EXPR_CXX_UUIDOF_TYPE);
+ RECORD(EXPR_LAMBDA);
#undef RECORD
}
@@ -800,6 +863,7 @@ void ASTWriter::WriteBlockInfoBlock() {
// Control Block.
BLOCK(CONTROL_BLOCK);
RECORD(METADATA);
+ RECORD(SIGNATURE);
RECORD(MODULE_NAME);
RECORD(MODULE_MAP_FILE);
RECORD(IMPORTS);
@@ -895,15 +959,14 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(TYPE_VARIABLE_ARRAY);
RECORD(TYPE_VECTOR);
RECORD(TYPE_EXT_VECTOR);
- RECORD(TYPE_FUNCTION_PROTO);
RECORD(TYPE_FUNCTION_NO_PROTO);
+ RECORD(TYPE_FUNCTION_PROTO);
RECORD(TYPE_TYPEDEF);
RECORD(TYPE_TYPEOF_EXPR);
RECORD(TYPE_TYPEOF);
RECORD(TYPE_RECORD);
RECORD(TYPE_ENUM);
RECORD(TYPE_OBJC_INTERFACE);
- RECORD(TYPE_OBJC_OBJECT);
RECORD(TYPE_OBJC_OBJECT_POINTER);
RECORD(TYPE_DECLTYPE);
RECORD(TYPE_ELABORATED);
@@ -920,8 +983,13 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(TYPE_PACK_EXPANSION);
RECORD(TYPE_ATTRIBUTED);
RECORD(TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK);
+ RECORD(TYPE_AUTO);
+ RECORD(TYPE_UNARY_TRANSFORM);
RECORD(TYPE_ATOMIC);
+ RECORD(TYPE_DECAYED);
+ RECORD(TYPE_ADJUSTED);
RECORD(DECL_TYPEDEF);
+ RECORD(DECL_TYPEALIAS);
RECORD(DECL_ENUM);
RECORD(DECL_RECORD);
RECORD(DECL_ENUM_CONSTANT);
@@ -990,42 +1058,76 @@ void ASTWriter::WriteBlockInfoBlock() {
Stream.ExitBlock();
}
+/// \brief Prepares a path for being written to an AST file by converting it
+/// to an absolute path and removing nested './'s.
+///
+/// \return \c true if the path was changed.
+bool cleanPathForOutput(FileManager &FileMgr, SmallVectorImpl<char> &Path) {
+ bool Changed = false;
+
+ if (!llvm::sys::path::is_absolute(StringRef(Path.data(), Path.size()))) {
+ llvm::sys::fs::make_absolute(Path);
+ Changed = true;
+ }
+
+ return Changed | FileMgr.removeDotPaths(Path);
+}
+
/// \brief Adjusts the given filename to only write out the portion of the
/// filename that is not part of the system root directory.
///
/// \param Filename the file name to adjust.
///
-/// \param isysroot When non-NULL, the PCH file is a relocatable PCH file and
-/// the returned filename will be adjusted by this system root.
+/// \param BaseDir When non-NULL, the PCH file is a relocatable AST file and
+/// the returned filename will be adjusted by this root directory.
///
/// \returns either the original filename (if it needs no adjustment) or the
/// adjusted filename (which points into the @p Filename parameter).
static const char *
-adjustFilenameForRelocatablePCH(const char *Filename, StringRef isysroot) {
+adjustFilenameForRelocatableAST(const char *Filename, StringRef BaseDir) {
assert(Filename && "No file name to adjust?");
- if (isysroot.empty())
+ if (BaseDir.empty())
return Filename;
// Verify that the filename and the system root have the same prefix.
unsigned Pos = 0;
- for (; Filename[Pos] && Pos < isysroot.size(); ++Pos)
- if (Filename[Pos] != isysroot[Pos])
+ for (; Filename[Pos] && Pos < BaseDir.size(); ++Pos)
+ if (Filename[Pos] != BaseDir[Pos])
return Filename; // Prefixes don't match.
// We hit the end of the filename before we hit the end of the system root.
if (!Filename[Pos])
return Filename;
- // If the file name has a '/' at the current position, skip over the '/'.
- // We distinguish sysroot-based includes from absolute includes by the
- // absence of '/' at the beginning of sysroot-based includes.
- if (Filename[Pos] == '/')
+ // If there's not a path separator at the end of the base directory nor
+ // immediately after it, then this isn't within the base directory.
+ if (!llvm::sys::path::is_separator(Filename[Pos])) {
+ if (!llvm::sys::path::is_separator(BaseDir.back()))
+ return Filename;
+ } else {
+ // If the file name has a '/' at the current position, skip over the '/'.
+ // We distinguish relative paths from absolute paths by the
+ // absence of '/' at the beginning of relative paths.
+ //
+ // FIXME: This is wrong. We distinguish them by asking if the path is
+ // absolute, which isn't the same thing. And there might be multiple '/'s
+ // in a row. Use a better mechanism to indicate whether we have emitted an
+ // absolute or relative path.
++Pos;
+ }
return Filename + Pos;
}
+static ASTFileSignature getSignature() {
+ while (1) {
+ if (ASTFileSignature S = llvm::sys::Process::GetRandomNumber())
+ return S;
+ // Rely on GetRandomNumber to eventually return non-zero...
+ }
+}
+
/// \brief Write the control block.
void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
StringRef isysroot,
@@ -1050,13 +1152,20 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Record.push_back(VERSION_MINOR);
Record.push_back(CLANG_VERSION_MAJOR);
Record.push_back(CLANG_VERSION_MINOR);
+ assert((!WritingModule || isysroot.empty()) &&
+ "writing module as a relocatable PCH?");
Record.push_back(!isysroot.empty());
Record.push_back(ASTHasCompilerErrors);
Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record,
getClangFullRepositoryVersion());
- // Module name
+ // Signature
+ Record.clear();
+ Record.push_back(getSignature());
+ Stream.EmitRecord(SIGNATURE, Record);
+
if (WritingModule) {
+ // Module name
BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(MODULE_NAME));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
@@ -1066,19 +1175,46 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Stream.EmitRecordWithBlob(AbbrevCode, Record, WritingModule->Name);
}
- // Module map file
- if (WritingModule) {
+ if (WritingModule && WritingModule->Directory) {
+ // Module directory.
BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
- Abbrev->Add(BitCodeAbbrevOp(MODULE_MAP_FILE));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Filename
+ Abbrev->Add(BitCodeAbbrevOp(MODULE_DIRECTORY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Directory
unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
-
- assert(WritingModule->ModuleMap && "missing module map");
- SmallString<128> ModuleMap(WritingModule->ModuleMap->getName());
- llvm::sys::fs::make_absolute(ModuleMap);
RecordData Record;
- Record.push_back(MODULE_MAP_FILE);
- Stream.EmitRecordWithBlob(AbbrevCode, Record, ModuleMap.str());
+ Record.push_back(MODULE_DIRECTORY);
+
+ SmallString<128> BaseDir(WritingModule->Directory->getName());
+ cleanPathForOutput(Context.getSourceManager().getFileManager(), BaseDir);
+ Stream.EmitRecordWithBlob(AbbrevCode, Record, BaseDir);
+
+ // Write out all other paths relative to the base directory if possible.
+ BaseDirectory.assign(BaseDir.begin(), BaseDir.end());
+ } else if (!isysroot.empty()) {
+ // Write out paths relative to the sysroot if possible.
+ BaseDirectory = isysroot;
+ }
+
+ // Module map file
+ if (WritingModule) {
+ Record.clear();
+
+ auto &Map = PP.getHeaderSearchInfo().getModuleMap();
+
+ // Primary module map file.
+ AddPath(Map.getModuleMapFileForUniquing(WritingModule)->getName(), Record);
+
+ // Additional module map files.
+ if (auto *AdditionalModMaps =
+ Map.getAdditionalModuleMapFiles(WritingModule)) {
+ Record.push_back(AdditionalModMaps->size());
+ for (const FileEntry *F : *AdditionalModMaps)
+ AddPath(F->getName(), Record);
+ } else {
+ Record.push_back(0);
+ }
+
+ Stream.EmitRecord(MODULE_MAP_FILE, Record);
}
// Imports
@@ -1096,9 +1232,8 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
AddSourceLocation((*M)->ImportLoc, Record);
Record.push_back((*M)->File->getSize());
Record.push_back((*M)->File->getModificationTime());
- const std::string &FileName = (*M)->FileName;
- Record.push_back(FileName.size());
- Record.append(FileName.begin(), FileName.end());
+ Record.push_back((*M)->Signature);
+ AddPath((*M)->FileName, Record);
}
Stream.EmitRecord(IMPORTS, Record);
}
@@ -1110,8 +1245,9 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Record.push_back(LangOpts.Name);
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
Record.push_back(static_cast<unsigned>(LangOpts.get##Name()));
-#include "clang/Basic/LangOptions.def"
-#define SANITIZER(NAME, ID) Record.push_back(LangOpts.Sanitize.ID);
+#include "clang/Basic/LangOptions.def"
+#define SANITIZER(NAME, ID) \
+ Record.push_back(LangOpts.Sanitize.has(SanitizerKind::ID));
#include "clang/Basic/Sanitizers.def"
Record.push_back((unsigned) LangOpts.ObjCRuntime.getKind());
@@ -1245,17 +1381,10 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
unsigned FileAbbrevCode = Stream.EmitAbbrev(FileAbbrev);
- SmallString<128> MainFilePath(MainFile->getName());
-
- llvm::sys::fs::make_absolute(MainFilePath);
-
- const char *MainFileNameStr = MainFilePath.c_str();
- MainFileNameStr = adjustFilenameForRelocatablePCH(MainFileNameStr,
- isysroot);
Record.clear();
Record.push_back(ORIGINAL_FILE);
Record.push_back(SM.getMainFileID().getOpaqueValue());
- Stream.EmitRecordWithBlob(FileAbbrevCode, Record, MainFileNameStr);
+ EmitRecordWithPath(FileAbbrevCode, Record, MainFile->getName());
}
Record.clear();
@@ -1281,7 +1410,6 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
WriteInputFiles(Context.SourceMgr,
PP.getHeaderSearchInfo().getHeaderSearchOpts(),
- isysroot,
PP.getLangOpts().Modules);
Stream.ExitBlock();
}
@@ -1297,7 +1425,6 @@ namespace {
void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
HeaderSearchOptions &HSOpts,
- StringRef isysroot,
bool Modules) {
using namespace llvm;
Stream.EnterSubblock(INPUT_FILES_BLOCK_ID, 4);
@@ -1368,23 +1495,8 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
// Whether this file was overridden.
Record.push_back(Entry.BufferOverridden);
- // Turn the file name into an absolute path, if it isn't already.
- const char *Filename = Entry.File->getName();
- SmallString<128> FilePath(Filename);
-
- // Ask the file manager to fixup the relative path for us. This will
- // honor the working directory.
- SourceMgr.getFileManager().FixupRelativePath(FilePath);
-
- // FIXME: This call to make_absolute shouldn't be necessary, the
- // call to FixupRelativePath should always return an absolute path.
- llvm::sys::fs::make_absolute(FilePath);
- Filename = FilePath.c_str();
-
- Filename = adjustFilenameForRelocatablePCH(Filename, isysroot);
-
- Stream.EmitRecordWithBlob(IFAbbrevCode, Record, Filename);
- }
+ EmitRecordWithPath(IFAbbrevCode, Record, Entry.File->getName());
+ }
Stream.ExitBlock();
@@ -1494,6 +1606,9 @@ namespace {
// The hash is based only on size/time of the file, so that the reader can
// match even when symlinking or excess path elements ("foo/../", "../")
// change the form of the name. However, complete path is still the key.
+ //
+ // FIXME: Using the mtime here will cause problems for explicit module
+ // imports.
return llvm::hash_combine(key.FE->getSize(),
key.FE->getModificationTime());
}
@@ -1574,7 +1689,7 @@ namespace {
/// \brief Write the header search block for the list of files that
///
/// \param HS The header search structure to save.
-void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS, StringRef isysroot) {
+void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
SmallVector<const FileEntry *, 16> FilesByUID;
HS.getFileMgr().GetUniqueIDMapping(FilesByUID);
@@ -1598,17 +1713,16 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS, StringRef isysroot) {
(HFI.isModuleHeader && !HFI.isCompilingModuleHeader))
continue;
- // Turn the file name into an absolute path, if it isn't already.
+ // Massage the file path into an appropriate form.
const char *Filename = File->getName();
- Filename = adjustFilenameForRelocatablePCH(Filename, isysroot);
-
- // If we performed any translation on the file name at all, we need to
- // save this string, since the generator will refer to it later.
- if (Filename != File->getName()) {
- Filename = strdup(Filename);
+ SmallString<128> FilenameTmp(Filename);
+ if (PreparePathForOutput(FilenameTmp)) {
+ // If we performed any translation on the file name at all, we need to
+ // save this string, since the generator will refer to it later.
+ Filename = strdup(FilenameTmp.c_str());
SavedStrings.push_back(Filename);
}
-
+
HeaderFileInfoTrait::key_type key = { File, Filename };
Generator.insert(key, HFI, GeneratorTrait);
++NumHeaderSearchEntries;
@@ -1658,8 +1772,7 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS, StringRef isysroot) {
/// errors), we probably won't have to create file entries for any of
/// the files in the AST.
void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
- const Preprocessor &PP,
- StringRef isysroot) {
+ const Preprocessor &PP) {
RecordData Record;
// Enter the source manager block.
@@ -1808,17 +1921,10 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
LineTableInfo &LineTable = SourceMgr.getLineTable();
Record.clear();
- // Emit the file names
+ // Emit the file names.
Record.push_back(LineTable.getNumFilenames());
- for (unsigned I = 0, N = LineTable.getNumFilenames(); I != N; ++I) {
- // Emit the file name
- const char *Filename = LineTable.getFilename(I);
- Filename = adjustFilenameForRelocatablePCH(Filename, isysroot);
- unsigned FilenameLen = Filename? strlen(Filename) : 0;
- Record.push_back(FilenameLen);
- if (FilenameLen)
- Record.insert(Record.end(), Filename, Filename + FilenameLen);
- }
+ for (unsigned I = 0, N = LineTable.getNumFilenames(); I != N; ++I)
+ AddPath(LineTable.getFilename(I), Record);
// Emit the line entries
for (LineTableInfo::iterator L = LineTable.begin(), LEnd = LineTable.end();
@@ -1904,10 +2010,8 @@ static bool shouldIgnoreMacro(MacroDirective *MD, bool IsModule,
if (IsModule) {
// Re-export any imported directives.
- // FIXME: Also ensure we re-export imported #undef directives.
- if (auto *DMD = dyn_cast<DefMacroDirective>(MD))
- if (DMD->isImported())
- return false;
+ if (MD->isImported())
+ return false;
SourceLocation Loc = MD->getLocation();
if (Loc.isInvalid())
@@ -1986,16 +2090,24 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
AddSourceLocation(MD->getLocation(), Record);
Record.push_back(MD->getKind());
- if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD)) {
+ if (auto *DefMD = dyn_cast<DefMacroDirective>(MD)) {
MacroID InfoID = getMacroRef(DefMD->getInfo(), Name);
Record.push_back(InfoID);
- Record.push_back(DefMD->isImported());
+ Record.push_back(DefMD->getOwningModuleID());
Record.push_back(DefMD->isAmbiguous());
-
- } else if (VisibilityMacroDirective *
- VisMD = dyn_cast<VisibilityMacroDirective>(MD)) {
+ } else if (auto *UndefMD = dyn_cast<UndefMacroDirective>(MD)) {
+ Record.push_back(UndefMD->getOwningModuleID());
+ } else {
+ auto *VisMD = cast<VisibilityMacroDirective>(MD);
Record.push_back(VisMD->isPublic());
}
+
+ if (MD->isImported()) {
+ auto Overrides = MD->getOverriddenModules();
+ Record.push_back(Overrides.size());
+ for (auto Override : Overrides)
+ Record.push_back(Override);
+ }
}
if (Record.empty())
continue;
@@ -2271,7 +2383,7 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
}
// Enter the submodule description block.
- Stream.EnterSubblock(SUBMODULE_BLOCK_ID, NUM_ALLOWED_ABBREVS_SIZE);
+ Stream.EnterSubblock(SUBMODULE_BLOCK_ID, /*bits for abbreviations*/5);
// Write the abbreviations needed for the submodules block.
using namespace llvm;
@@ -2322,11 +2434,21 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
unsigned ExcludedHeaderAbbrev = Stream.EmitAbbrev(Abbrev);
Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_TEXTUAL_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned TextualHeaderAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_PRIVATE_HEADER));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
unsigned PrivateHeaderAbbrev = Stream.EmitAbbrev(Abbrev);
Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_PRIVATE_TEXTUAL_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned PrivateTextualHeaderAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_LINK_LIBRARY));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFramework
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
@@ -2398,35 +2520,34 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Stream.EmitRecordWithBlob(UmbrellaDirAbbrev, Record,
UmbrellaDir->getName());
}
-
+
// Emit the headers.
- for (unsigned I = 0, N = Mod->NormalHeaders.size(); I != N; ++I) {
- Record.clear();
- Record.push_back(SUBMODULE_HEADER);
- Stream.EmitRecordWithBlob(HeaderAbbrev, Record,
- Mod->NormalHeaders[I]->getName());
- }
- // Emit the excluded headers.
- for (unsigned I = 0, N = Mod->ExcludedHeaders.size(); I != N; ++I) {
- Record.clear();
- Record.push_back(SUBMODULE_EXCLUDED_HEADER);
- Stream.EmitRecordWithBlob(ExcludedHeaderAbbrev, Record,
- Mod->ExcludedHeaders[I]->getName());
- }
- // Emit the private headers.
- for (unsigned I = 0, N = Mod->PrivateHeaders.size(); I != N; ++I) {
+ struct {
+ unsigned RecordKind;
+ unsigned Abbrev;
+ Module::HeaderKind HeaderKind;
+ } HeaderLists[] = {
+ {SUBMODULE_HEADER, HeaderAbbrev, Module::HK_Normal},
+ {SUBMODULE_TEXTUAL_HEADER, TextualHeaderAbbrev, Module::HK_Textual},
+ {SUBMODULE_PRIVATE_HEADER, PrivateHeaderAbbrev, Module::HK_Private},
+ {SUBMODULE_PRIVATE_TEXTUAL_HEADER, PrivateTextualHeaderAbbrev,
+ Module::HK_PrivateTextual},
+ {SUBMODULE_EXCLUDED_HEADER, ExcludedHeaderAbbrev, Module::HK_Excluded}
+ };
+ for (auto &HL : HeaderLists) {
Record.clear();
- Record.push_back(SUBMODULE_PRIVATE_HEADER);
- Stream.EmitRecordWithBlob(PrivateHeaderAbbrev, Record,
- Mod->PrivateHeaders[I]->getName());
+ Record.push_back(HL.RecordKind);
+ for (auto &H : Mod->Headers[HL.HeaderKind])
+ Stream.EmitRecordWithBlob(HL.Abbrev, Record, H.NameAsWritten);
}
- ArrayRef<const FileEntry *>
- TopHeaders = Mod->getTopHeaders(PP->getFileManager());
- for (unsigned I = 0, N = TopHeaders.size(); I != N; ++I) {
+
+ // Emit the top headers.
+ {
+ auto TopHeaders = Mod->getTopHeaders(PP->getFileManager());
Record.clear();
Record.push_back(SUBMODULE_TOPHEADER);
- Stream.EmitRecordWithBlob(TopHeaderAbbrev, Record,
- TopHeaders[I]->getName());
+ for (auto *H : TopHeaders)
+ Stream.EmitRecordWithBlob(TopHeaderAbbrev, Record, H->getName());
}
// Emit the imports.
@@ -2434,7 +2555,7 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Record.clear();
for (unsigned I = 0, N = Mod->Imports.size(); I != N; ++I) {
unsigned ImportedID = getSubmoduleID(Mod->Imports[I]);
- assert(ImportedID && "Unknown submodule!");
+ assert(ImportedID && "Unknown submodule!");
Record.push_back(ImportedID);
}
Stream.EmitRecord(SUBMODULE_IMPORTS, Record);
@@ -2611,12 +2732,14 @@ void ASTWriter::WriteType(QualType T) {
// Emit the type's representation.
ASTTypeWriter W(*this, Record);
+ W.AbbrevToUse = 0;
if (T.hasLocalNonFastQualifiers()) {
Qualifiers Qs = T.getLocalQualifiers();
AddTypeRef(T.getLocalUnqualifiedType(), Record);
Record.push_back(Qs.getAsOpaqueValue());
W.Code = TYPE_EXT_QUAL;
+ W.AbbrevToUse = TypeExtQualAbbrev;
} else {
switch (T->getTypeClass()) {
// For all of the concrete, non-dependent types, call the
@@ -2629,7 +2752,7 @@ void ASTWriter::WriteType(QualType T) {
}
// Emit the serialized record.
- Stream.EmitRecord(W.Code, Record);
+ Stream.EmitRecord(W.Code, Record, W.AbbrevToUse);
// Flush any expressions that were written as part of this type.
FlushStmts();
@@ -2772,11 +2895,11 @@ public:
unsigned DataLen = 4 + 2 + 2; // 2 bytes for each of the method counts
for (const ObjCMethodList *Method = &Methods.Instance; Method;
Method = Method->getNext())
- if (Method->Method)
+ if (Method->getMethod())
DataLen += 4;
for (const ObjCMethodList *Method = &Methods.Factory; Method;
Method = Method->getNext())
- if (Method->Method)
+ if (Method->getMethod())
DataLen += 4;
LE.write<uint16_t>(DataLen);
return std::make_pair(KeyLen, DataLen);
@@ -2806,32 +2929,39 @@ public:
unsigned NumInstanceMethods = 0;
for (const ObjCMethodList *Method = &Methods.Instance; Method;
Method = Method->getNext())
- if (Method->Method)
+ if (Method->getMethod())
++NumInstanceMethods;
unsigned NumFactoryMethods = 0;
for (const ObjCMethodList *Method = &Methods.Factory; Method;
Method = Method->getNext())
- if (Method->Method)
+ if (Method->getMethod())
++NumFactoryMethods;
unsigned InstanceBits = Methods.Instance.getBits();
assert(InstanceBits < 4);
- unsigned NumInstanceMethodsAndBits =
- (NumInstanceMethods << 2) | InstanceBits;
+ unsigned InstanceHasMoreThanOneDeclBit =
+ Methods.Instance.hasMoreThanOneDecl();
+ unsigned FullInstanceBits = (NumInstanceMethods << 3) |
+ (InstanceHasMoreThanOneDeclBit << 2) |
+ InstanceBits;
unsigned FactoryBits = Methods.Factory.getBits();
assert(FactoryBits < 4);
- unsigned NumFactoryMethodsAndBits = (NumFactoryMethods << 2) | FactoryBits;
- LE.write<uint16_t>(NumInstanceMethodsAndBits);
- LE.write<uint16_t>(NumFactoryMethodsAndBits);
+ unsigned FactoryHasMoreThanOneDeclBit =
+ Methods.Factory.hasMoreThanOneDecl();
+ unsigned FullFactoryBits = (NumFactoryMethods << 3) |
+ (FactoryHasMoreThanOneDeclBit << 2) |
+ FactoryBits;
+ LE.write<uint16_t>(FullInstanceBits);
+ LE.write<uint16_t>(FullFactoryBits);
for (const ObjCMethodList *Method = &Methods.Instance; Method;
Method = Method->getNext())
- if (Method->Method)
- LE.write<uint32_t>(Writer.getDeclID(Method->Method));
+ if (Method->getMethod())
+ LE.write<uint32_t>(Writer.getDeclID(Method->getMethod()));
for (const ObjCMethodList *Method = &Methods.Factory; Method;
Method = Method->getNext())
- if (Method->Method)
- LE.write<uint32_t>(Writer.getDeclID(Method->Method));
+ if (Method->getMethod())
+ LE.write<uint32_t>(Writer.getDeclID(Method->getMethod()));
assert(Out.tell() - Start == DataLen && "Data length is wrong");
}
@@ -2877,19 +3007,19 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
if (Chain && I->second < FirstSelectorID) {
// Selector already exists. Did it change?
bool changed = false;
- for (ObjCMethodList *M = &Data.Instance; !changed && M && M->Method;
- M = M->getNext()) {
- if (!M->Method->isFromASTFile())
+ for (ObjCMethodList *M = &Data.Instance;
+ !changed && M && M->getMethod(); M = M->getNext()) {
+ if (!M->getMethod()->isFromASTFile())
changed = true;
}
- for (ObjCMethodList *M = &Data.Factory; !changed && M && M->Method;
+ for (ObjCMethodList *M = &Data.Factory; !changed && M && M->getMethod();
M = M->getNext()) {
- if (!M->Method->isFromASTFile())
+ if (!M->getMethod()->isFromASTFile())
changed = true;
}
if (!changed)
continue;
- } else if (Data.Instance.Method || Data.Factory.Method) {
+ } else if (Data.Instance.getMethod() || Data.Factory.getMethod()) {
// A new method pool entry.
++NumTableEntries;
}
@@ -2995,115 +3125,140 @@ class ASTIdentifierTableTrait {
if (Macro || (Macro = PP.getMacroDirectiveHistory(II))) {
if (!IsModule)
return !shouldIgnoreMacro(Macro, IsModule, PP);
- SubmoduleID ModID;
- if (getFirstPublicSubmoduleMacro(Macro, ModID))
+
+ MacroState State;
+ if (getFirstPublicSubmoduleMacro(Macro, State))
return true;
}
return false;
}
- typedef llvm::SmallVectorImpl<SubmoduleID> OverriddenList;
+ enum class SubmoduleMacroState {
+ /// We've seen nothing about this macro.
+ None,
+ /// We've seen a public visibility directive.
+ Public,
+ /// We've either exported a macro for this module or found that the
+ /// module's definition of this macro is private.
+ Done
+ };
+ typedef llvm::DenseMap<SubmoduleID, SubmoduleMacroState> MacroState;
MacroDirective *
- getFirstPublicSubmoduleMacro(MacroDirective *MD, SubmoduleID &ModID) {
- ModID = 0;
- llvm::SmallVector<SubmoduleID, 1> Overridden;
- if (MacroDirective *NextMD = getPublicSubmoduleMacro(MD, ModID, Overridden))
- if (!shouldIgnoreMacro(NextMD, IsModule, PP))
- return NextMD;
+ getFirstPublicSubmoduleMacro(MacroDirective *MD, MacroState &State) {
+ if (MacroDirective *NextMD = getPublicSubmoduleMacro(MD, State))
+ return NextMD;
return nullptr;
}
MacroDirective *
- getNextPublicSubmoduleMacro(MacroDirective *MD, SubmoduleID &ModID,
- OverriddenList &Overridden) {
+ getNextPublicSubmoduleMacro(MacroDirective *MD, MacroState &State) {
if (MacroDirective *NextMD =
- getPublicSubmoduleMacro(MD->getPrevious(), ModID, Overridden))
- if (!shouldIgnoreMacro(NextMD, IsModule, PP))
- return NextMD;
+ getPublicSubmoduleMacro(MD->getPrevious(), State))
+ return NextMD;
return nullptr;
}
- /// \brief Traverses the macro directives history and returns the latest
- /// public macro definition or undefinition that is not in ModID.
+ /// \brief Traverses the macro directives history and returns the next
+ /// public macro definition or undefinition that has not been found so far.
+ ///
/// A macro that is defined in submodule A and undefined in submodule B
/// will still be considered as defined/exported from submodule A.
- /// ModID is updated to the module containing the returned directive.
- ///
- /// FIXME: This process breaks down if a module defines a macro, imports
- /// another submodule that changes the macro, then changes the
- /// macro again itself.
MacroDirective *getPublicSubmoduleMacro(MacroDirective *MD,
- SubmoduleID &ModID,
- OverriddenList &Overridden) {
- Overridden.clear();
+ MacroState &State) {
if (!MD)
return nullptr;
- SubmoduleID OrigModID = ModID;
Optional<bool> IsPublic;
for (; MD; MD = MD->getPrevious()) {
- SubmoduleID ThisModID = getSubmoduleID(MD);
- if (ThisModID == 0) {
- IsPublic = Optional<bool>();
-
- // If we have no directive location, this macro was installed when
- // finalizing the ASTReader.
- if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD))
- if (DefMD->getInfo()->getOwningModuleID())
- return MD;
- // Skip imports that only produce #undefs for now.
- // FIXME: We should still re-export them!
+ // Once we hit an ignored macro, we're done: the rest of the chain
+ // will all be ignored macros.
+ if (shouldIgnoreMacro(MD, IsModule, PP))
+ break;
+
+ // If this macro was imported, re-export it.
+ if (MD->isImported())
+ return MD;
+ SubmoduleID ModID = getSubmoduleID(MD);
+ auto &S = State[ModID];
+ assert(ModID && "found macro in no submodule");
+
+ if (S == SubmoduleMacroState::Done)
continue;
+
+ if (auto *VisMD = dyn_cast<VisibilityMacroDirective>(MD)) {
+ // The latest visibility directive for a name in a submodule affects all
+ // the directives that come before it.
+ if (S == SubmoduleMacroState::None)
+ S = VisMD->isPublic() ? SubmoduleMacroState::Public
+ : SubmoduleMacroState::Done;
+ } else {
+ S = SubmoduleMacroState::Done;
+ return MD;
}
- if (ThisModID != ModID) {
- ModID = ThisModID;
- IsPublic = Optional<bool>();
- }
+ }
+
+ return nullptr;
+ }
+
+ ArrayRef<SubmoduleID>
+ getOverriddenSubmodules(MacroDirective *MD,
+ SmallVectorImpl<SubmoduleID> &ScratchSpace) {
+ assert(!isa<VisibilityMacroDirective>(MD) &&
+ "only #define and #undef can override");
+ if (MD->isImported())
+ return MD->getOverriddenModules();
+
+ ScratchSpace.clear();
+ SubmoduleID ModID = getSubmoduleID(MD);
+ for (MD = MD->getPrevious(); MD; MD = MD->getPrevious()) {
+ if (shouldIgnoreMacro(MD, IsModule, PP))
+ break;
// If this is a definition from a submodule import, that submodule's
// definition is overridden by the definition or undefinition that we
// started with.
- // FIXME: This should only apply to macros defined in OrigModID.
- // We can't do that currently, because a #include of a different submodule
- // of the same module just leaks through macros instead of providing new
- // DefMacroDirectives for them.
- if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD)) {
- // Figure out which submodule the macro was originally defined within.
- SubmoduleID SourceID = DefMD->getInfo()->getOwningModuleID();
- if (!SourceID) {
- SourceLocation DefLoc = DefMD->getInfo()->getDefinitionLoc();
- if (DefLoc == MD->getLocation())
- SourceID = ThisModID;
- else
- SourceID = Writer.inferSubmoduleIDFromLocation(DefLoc);
+ if (MD->isImported()) {
+ if (auto *DefMD = dyn_cast<DefMacroDirective>(MD)) {
+ SubmoduleID DefModuleID = DefMD->getInfo()->getOwningModuleID();
+ assert(DefModuleID && "imported macro has no owning module");
+ ScratchSpace.push_back(DefModuleID);
+ } else if (auto *UndefMD = dyn_cast<UndefMacroDirective>(MD)) {
+ // If we override a #undef, we override anything that #undef overrides.
+ // We don't need to override it, since an active #undef doesn't affect
+ // the meaning of a macro.
+ auto Overrides = UndefMD->getOverriddenModules();
+ ScratchSpace.insert(ScratchSpace.end(),
+ Overrides.begin(), Overrides.end());
}
- if (OrigModID && SourceID != OrigModID)
- Overridden.push_back(SourceID);
}
- // We are looking for a definition in a different submodule than the one
- // that we started with. If a submodule has re-definitions of the same
- // macro, only the last definition will be used as the "exported" one.
- if (ModID == OrigModID)
- continue;
-
- // The latest visibility directive for a name in a submodule affects all
- // the directives that come before it.
- if (VisibilityMacroDirective *VisMD =
- dyn_cast<VisibilityMacroDirective>(MD)) {
- if (!IsPublic.hasValue())
- IsPublic = VisMD->isPublic();
- } else if (!IsPublic.hasValue() || IsPublic.getValue()) {
- // FIXME: If we find an imported macro, we should include its list of
- // overrides in our export.
- return MD;
+ // Stop once we leave the original macro's submodule.
+ //
+ // Either this submodule #included another submodule of the same
+ // module or it just happened to be built after the other module.
+ // In the former case, we override the submodule's macro.
+ //
+ // FIXME: In the latter case, we shouldn't do so, but we can't tell
+ // these cases apart.
+ //
+ // FIXME: We can leave this submodule and re-enter it if it #includes a
+ // header within a different submodule of the same module. In such cases
+ // the overrides list will be incomplete.
+ SubmoduleID DirectiveModuleID = getSubmoduleID(MD);
+ if (DirectiveModuleID != ModID) {
+ if (DirectiveModuleID && !MD->isImported())
+ ScratchSpace.push_back(DirectiveModuleID);
+ break;
}
}
- return nullptr;
+ std::sort(ScratchSpace.begin(), ScratchSpace.end());
+ ScratchSpace.erase(std::unique(ScratchSpace.begin(), ScratchSpace.end()),
+ ScratchSpace.end());
+ return ScratchSpace;
}
SubmoduleID getSubmoduleID(MacroDirective *MD) {
@@ -3139,27 +3294,23 @@ public:
if (hadMacroDefinition(II, Macro)) {
DataLen += 4; // MacroDirectives offset.
if (IsModule) {
- SubmoduleID ModID;
- llvm::SmallVector<SubmoduleID, 4> Overridden;
- for (MacroDirective *
- MD = getFirstPublicSubmoduleMacro(Macro, ModID);
- MD; MD = getNextPublicSubmoduleMacro(MD, ModID, Overridden)) {
- // Previous macro's overrides.
- if (!Overridden.empty())
- DataLen += 4 * (1 + Overridden.size());
+ MacroState State;
+ SmallVector<SubmoduleID, 16> Scratch;
+ for (MacroDirective *MD = getFirstPublicSubmoduleMacro(Macro, State);
+ MD; MD = getNextPublicSubmoduleMacro(MD, State)) {
DataLen += 4; // MacroInfo ID or ModuleID.
+ if (unsigned NumOverrides =
+ getOverriddenSubmodules(MD, Scratch).size())
+ DataLen += 4 * (1 + NumOverrides);
}
- // Previous macro's overrides.
- if (!Overridden.empty())
- DataLen += 4 * (1 + Overridden.size());
- DataLen += 4;
+ DataLen += 4; // 0 terminator.
}
}
for (IdentifierResolver::iterator D = IdResolver.begin(II),
DEnd = IdResolver.end();
D != DEnd; ++D)
- DataLen += sizeof(DeclID);
+ DataLen += 4;
}
using namespace llvm::support;
endian::Writer<little> LE(Out);
@@ -3186,8 +3337,10 @@ public:
using namespace llvm::support;
endian::Writer<little> LE(Out);
LE.write<uint32_t>(Overridden.size() | 0x80000000U);
- for (unsigned I = 0, N = Overridden.size(); I != N; ++I)
+ for (unsigned I = 0, N = Overridden.size(); I != N; ++I) {
+ assert(Overridden[I] && "zero module ID for override");
LE.write<uint32_t>(Overridden[I]);
+ }
}
}
@@ -3219,24 +3372,28 @@ public:
LE.write<uint32_t>(Writer.getMacroDirectivesOffset(II));
if (IsModule) {
// Write the IDs of macros coming from different submodules.
- SubmoduleID ModID;
- llvm::SmallVector<SubmoduleID, 4> Overridden;
- for (MacroDirective *
- MD = getFirstPublicSubmoduleMacro(Macro, ModID);
- MD; MD = getNextPublicSubmoduleMacro(MD, ModID, Overridden)) {
- MacroID InfoID = 0;
- emitMacroOverrides(Out, Overridden);
+ MacroState State;
+ SmallVector<SubmoduleID, 16> Scratch;
+ for (MacroDirective *MD = getFirstPublicSubmoduleMacro(Macro, State);
+ MD; MD = getNextPublicSubmoduleMacro(MD, State)) {
if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD)) {
- InfoID = Writer.getMacroID(DefMD->getInfo());
+ // FIXME: If this macro directive was created by #pragma pop_macros,
+ // or if it was created implicitly by resolving conflicting macros,
+ // it may be for a different submodule from the one in the MacroInfo
+ // object. If so, we should write out its owning ModuleID.
+ MacroID InfoID = Writer.getMacroID(DefMD->getInfo());
assert(InfoID);
LE.write<uint32_t>(InfoID << 1);
} else {
- assert(isa<UndefMacroDirective>(MD));
- LE.write<uint32_t>((ModID << 1) | 1);
+ auto *UndefMD = cast<UndefMacroDirective>(MD);
+ SubmoduleID Mod = UndefMD->isImported()
+ ? UndefMD->getOwningModuleID()
+ : getSubmoduleID(UndefMD);
+ LE.write<uint32_t>((Mod << 1) | 1);
}
+ emitMacroOverrides(Out, getOverriddenSubmodules(MD, Scratch));
}
- emitMacroOverrides(Out, Overridden);
- LE.write<uint32_t>(0);
+ LE.write<uint32_t>(0xdeadbeef);
}
}
@@ -3367,6 +3524,31 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
// DeclContext's Name Lookup Table Serialization
//===----------------------------------------------------------------------===//
+/// Determine the declaration that should be put into the name lookup table to
+/// represent the given declaration in this module. This is usually D itself,
+/// but if D was imported and merged into a local declaration, we want the most
+/// recent local declaration instead. The chosen declaration will be the most
+/// recent declaration in any module that imports this one.
+static NamedDecl *getDeclForLocalLookup(NamedDecl *D) {
+ if (!D->isFromASTFile())
+ return D;
+
+ if (Decl *Redecl = D->getPreviousDecl()) {
+ // For Redeclarable decls, a prior declaration might be local.
+ for (; Redecl; Redecl = Redecl->getPreviousDecl())
+ if (!Redecl->isFromASTFile())
+ return cast<NamedDecl>(Redecl);
+ } else if (Decl *First = D->getCanonicalDecl()) {
+ // For Mergeable decls, the first decl might be local.
+ if (!First->isFromASTFile())
+ return cast<NamedDecl>(First);
+ }
+
+ // All declarations are imported. Our most recent declaration will also be
+ // the most recent one in anyone who imports us.
+ return D;
+}
+
namespace {
// Trait used for the on-disk hash table used in the method pool.
class ASTDeclContextNameLookupTrait {
@@ -3484,7 +3666,7 @@ public:
LE.write<uint16_t>(Lookup.size());
for (DeclContext::lookup_iterator I = Lookup.begin(), E = Lookup.end();
I != E; ++I)
- LE.write<uint32_t>(Writer.GetDeclRef(*I));
+ LE.write<uint32_t>(Writer.GetDeclRef(getDeclForLocalLookup(*I)));
assert(Out.tell() - Start == DataLen && "Data length is wrong");
}
@@ -3524,13 +3706,13 @@ static void visitLocalLookupResults(const DeclContext *ConstDC,
}
void ASTWriter::AddUpdatedDeclContext(const DeclContext *DC) {
- if (UpdatedDeclContexts.insert(DC) && WritingAST) {
+ if (UpdatedDeclContexts.insert(DC).second && WritingAST) {
// Ensure we emit all the visible declarations.
visitLocalLookupResults(DC, DC->NeedToReconcileExternalVisibleStorage,
[&](DeclarationName Name,
DeclContext::lookup_const_result Result) {
for (auto *Decl : Result)
- GetDeclRef(Decl);
+ GetDeclRef(getDeclForLocalLookup(Decl));
});
}
}
@@ -3646,8 +3828,6 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
Record.push_back(BucketOffset);
Stream.EmitRecordWithBlob(DeclContextVisibleLookupAbbrev, Record,
LookupTable.str());
-
- Stream.EmitRecord(DECL_CONTEXT_VISIBLE, Record);
++NumVisibleDeclContexts;
return Offset;
}
@@ -3729,6 +3909,8 @@ void ASTWriter::WriteRedeclarations() {
FirstFromAST = Prev;
}
+ // FIXME: Do we need to do this for the first declaration from each
+ // redeclaration chain that was merged into this one?
Chain->MergedDecls[FirstFromAST].push_back(getDeclID(First));
}
@@ -3914,6 +4096,37 @@ void ASTWriter::AddString(StringRef Str, RecordDataImpl &Record) {
Record.insert(Record.end(), Str.begin(), Str.end());
}
+bool ASTWriter::PreparePathForOutput(SmallVectorImpl<char> &Path) {
+ assert(Context && "should have context when outputting path");
+
+ bool Changed =
+ cleanPathForOutput(Context->getSourceManager().getFileManager(), Path);
+
+ // Remove a prefix to make the path relative, if relevant.
+ const char *PathBegin = Path.data();
+ const char *PathPtr =
+ adjustFilenameForRelocatableAST(PathBegin, BaseDirectory);
+ if (PathPtr != PathBegin) {
+ Path.erase(Path.begin(), Path.begin() + (PathPtr - PathBegin));
+ Changed = true;
+ }
+
+ return Changed;
+}
+
+void ASTWriter::AddPath(StringRef Path, RecordDataImpl &Record) {
+ SmallString<128> FilePath(Path);
+ PreparePathForOutput(FilePath);
+ AddString(FilePath, Record);
+}
+
+void ASTWriter::EmitRecordWithPath(unsigned Abbrev, RecordDataImpl &Record,
+ StringRef Path) {
+ SmallString<128> FilePath(Path);
+ PreparePathForOutput(FilePath);
+ Stream.EmitRecordWithBlob(Abbrev, Record, FilePath);
+}
+
void ASTWriter::AddVersionTuple(const VersionTuple &Version,
RecordDataImpl &Record) {
Record.push_back(Version.getMajor());
@@ -3950,29 +4163,26 @@ void ASTWriter::SetSelectorOffset(Selector Sel, uint32_t Offset) {
}
ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream)
- : Stream(Stream), Context(nullptr), PP(nullptr), Chain(nullptr),
- WritingModule(nullptr), WritingAST(false), DoneWritingDeclsAndTypes(false),
- ASTHasCompilerErrors(false),
- FirstDeclID(NUM_PREDEF_DECL_IDS), NextDeclID(FirstDeclID),
- FirstTypeID(NUM_PREDEF_TYPE_IDS), NextTypeID(FirstTypeID),
- FirstIdentID(NUM_PREDEF_IDENT_IDS), NextIdentID(FirstIdentID),
- FirstMacroID(NUM_PREDEF_MACRO_IDS), NextMacroID(FirstMacroID),
- FirstSubmoduleID(NUM_PREDEF_SUBMODULE_IDS),
- NextSubmoduleID(FirstSubmoduleID),
- FirstSelectorID(NUM_PREDEF_SELECTOR_IDS), NextSelectorID(FirstSelectorID),
- CollectedStmts(&StmtsToEmit),
- NumStatements(0), NumMacros(0), NumLexicalDeclContexts(0),
- NumVisibleDeclContexts(0),
- NextCXXBaseSpecifiersID(1),
- DeclParmVarAbbrev(0), DeclContextLexicalAbbrev(0),
- DeclContextVisibleLookupAbbrev(0), UpdateVisibleAbbrev(0),
- DeclRefExprAbbrev(0), CharacterLiteralAbbrev(0),
- DeclRecordAbbrev(0), IntegerLiteralAbbrev(0),
- DeclTypedefAbbrev(0),
- DeclVarAbbrev(0), DeclFieldAbbrev(0),
- DeclEnumAbbrev(0), DeclObjCIvarAbbrev(0)
-{
-}
+ : Stream(Stream), Context(nullptr), PP(nullptr), Chain(nullptr),
+ WritingModule(nullptr), WritingAST(false),
+ DoneWritingDeclsAndTypes(false), ASTHasCompilerErrors(false),
+ FirstDeclID(NUM_PREDEF_DECL_IDS), NextDeclID(FirstDeclID),
+ FirstTypeID(NUM_PREDEF_TYPE_IDS), NextTypeID(FirstTypeID),
+ FirstIdentID(NUM_PREDEF_IDENT_IDS), NextIdentID(FirstIdentID),
+ FirstMacroID(NUM_PREDEF_MACRO_IDS), NextMacroID(FirstMacroID),
+ FirstSubmoduleID(NUM_PREDEF_SUBMODULE_IDS),
+ NextSubmoduleID(FirstSubmoduleID),
+ FirstSelectorID(NUM_PREDEF_SELECTOR_IDS), NextSelectorID(FirstSelectorID),
+ CollectedStmts(&StmtsToEmit), NumStatements(0), NumMacros(0),
+ NumLexicalDeclContexts(0), NumVisibleDeclContexts(0),
+ NextCXXBaseSpecifiersID(1), TypeExtQualAbbrev(0),
+ TypeFunctionProtoAbbrev(0), DeclParmVarAbbrev(0),
+ DeclContextLexicalAbbrev(0), DeclContextVisibleLookupAbbrev(0),
+ UpdateVisibleAbbrev(0), DeclRecordAbbrev(0), DeclTypedefAbbrev(0),
+ DeclVarAbbrev(0), DeclFieldAbbrev(0), DeclEnumAbbrev(0),
+ DeclObjCIvarAbbrev(0), DeclCXXMethodAbbrev(0), DeclRefExprAbbrev(0),
+ CharacterLiteralAbbrev(0), IntegerLiteralAbbrev(0),
+ ExprImplicitCastAbbrev(0) {}
ASTWriter::~ASTWriter() {
llvm::DeleteContainerSeconds(FileDeclIDs);
@@ -4001,6 +4211,7 @@ void ASTWriter::WriteAST(Sema &SemaRef,
Context = nullptr;
PP = nullptr;
this->WritingModule = nullptr;
+ this->BaseDirectory.clear();
WritingAST = false;
}
@@ -4150,6 +4361,11 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
}
}
+ // Build a record containing all of the UnusedLocalTypedefNameCandidates.
+ RecordData UnusedLocalTypedefNameCandidates;
+ for (const TypedefNameDecl *TD : SemaRef.UnusedLocalTypedefNameCandidates)
+ AddDeclRef(TD, UnusedLocalTypedefNameCandidates);
+
// Build a record containing all of dynamic classes declarations.
RecordData DynamicClasses;
AddLazyVectorDecls(*this, SemaRef.DynamicClasses, DynamicClasses);
@@ -4316,22 +4532,36 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
SmallString<2048> Buffer;
{
llvm::raw_svector_ostream Out(Buffer);
- for (ModuleManager::ModuleConstIterator M = Chain->ModuleMgr.begin(),
- MEnd = Chain->ModuleMgr.end();
- M != MEnd; ++M) {
+ for (ModuleFile *M : Chain->ModuleMgr) {
using namespace llvm::support;
endian::Writer<little> LE(Out);
- StringRef FileName = (*M)->FileName;
+ StringRef FileName = M->FileName;
LE.write<uint16_t>(FileName.size());
Out.write(FileName.data(), FileName.size());
- LE.write<uint32_t>((*M)->SLocEntryBaseOffset);
- LE.write<uint32_t>((*M)->BaseIdentifierID);
- LE.write<uint32_t>((*M)->BaseMacroID);
- LE.write<uint32_t>((*M)->BasePreprocessedEntityID);
- LE.write<uint32_t>((*M)->BaseSubmoduleID);
- LE.write<uint32_t>((*M)->BaseSelectorID);
- LE.write<uint32_t>((*M)->BaseDeclID);
- LE.write<uint32_t>((*M)->BaseTypeIndex);
+
+ // Note: if a base ID was uint max, it would not be possible to load
+ // another module after it or have more than one entity inside it.
+ uint32_t None = std::numeric_limits<uint32_t>::max();
+
+ auto writeBaseIDOrNone = [&](uint32_t BaseID, bool ShouldWrite) {
+ assert(BaseID < std::numeric_limits<uint32_t>::max() && "base id too high");
+ if (ShouldWrite)
+ LE.write<uint32_t>(BaseID);
+ else
+ LE.write<uint32_t>(None);
+ };
+
+ // These values should be unique within a chain, since they will be read
+ // as keys into ContinuousRangeMaps.
+ writeBaseIDOrNone(M->SLocEntryBaseOffset, M->LocalNumSLocEntries);
+ writeBaseIDOrNone(M->BaseIdentifierID, M->LocalNumIdentifiers);
+ writeBaseIDOrNone(M->BaseMacroID, M->LocalNumMacros);
+ writeBaseIDOrNone(M->BasePreprocessedEntityID,
+ M->NumPreprocessedEntities);
+ writeBaseIDOrNone(M->BaseSubmoduleID, M->LocalNumSubmodules);
+ writeBaseIDOrNone(M->BaseSelectorID, M->LocalNumSelectors);
+ writeBaseIDOrNone(M->BaseDeclID, M->LocalNumDecls);
+ writeBaseIDOrNone(M->BaseTypeIndex, M->LocalNumTypes);
}
}
Record.clear();
@@ -4344,8 +4574,9 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
// Keep writing types, declarations, and declaration update records
// until we've emitted all of them.
- Stream.EnterSubblock(DECLTYPES_BLOCK_ID, NUM_ALLOWED_ABBREVS_SIZE);
- WriteDeclsBlockAbbrevs();
+ Stream.EnterSubblock(DECLTYPES_BLOCK_ID, /*bits for abbreviations*/5);
+ WriteTypeAbbrevs();
+ WriteDeclAbbrevs();
for (DeclsToRewriteTy::iterator I = DeclsToRewrite.begin(),
E = DeclsToRewrite.end();
I != E; ++I)
@@ -4371,11 +4602,11 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
Stream.EmitRecord(DECL_UPDATE_OFFSETS, DeclUpdatesOffsetsRecord);
WriteCXXBaseSpecifiersOffsets();
WriteFileDeclIDsMap();
- WriteSourceManagerBlock(Context.getSourceManager(), PP, isysroot);
+ WriteSourceManagerBlock(Context.getSourceManager(), PP);
WriteComments();
WritePreprocessor(PP, isModule);
- WriteHeaderSearch(PP.getHeaderSearchInfo(), isysroot);
+ WriteHeaderSearch(PP.getHeaderSearchInfo());
WriteSelectors(SemaRef);
WriteReferencedSelectorsPool(SemaRef);
WriteIdentifierTable(PP, SemaRef.IdResolver, isModule);
@@ -4423,6 +4654,11 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
if (!DynamicClasses.empty())
Stream.EmitRecord(DYNAMIC_CLASSES, DynamicClasses);
+ // Write the record containing potentially unused local typedefs.
+ if (!UnusedLocalTypedefNameCandidates.empty())
+ Stream.EmitRecord(UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES,
+ UnusedLocalTypedefNameCandidates);
+
// Write the record containing pending implicit instantiations.
if (!PendingInstantiations.empty())
Stream.EmitRecord(PENDING_IMPLICIT_INSTANTIATIONS, PendingInstantiations);
@@ -4469,10 +4705,13 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
auto Cmp = [](const ModuleInfo &A, const ModuleInfo &B) {
return A.ID < B.ID;
};
+ auto Eq = [](const ModuleInfo &A, const ModuleInfo &B) {
+ return A.ID == B.ID;
+ };
// Sort and deduplicate module IDs.
std::sort(Imports.begin(), Imports.end(), Cmp);
- Imports.erase(std::unique(Imports.begin(), Imports.end(), Cmp),
+ Imports.erase(std::unique(Imports.begin(), Imports.end(), Eq),
Imports.end());
RecordData ImportedModules;
@@ -4532,17 +4771,17 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
Record.push_back(GetDeclRef(Update.getDecl()));
break;
- case UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER:
- AddSourceLocation(Update.getLoc(), Record);
- break;
-
- case UPD_CXX_INSTANTIATED_FUNCTION_DEFINITION:
+ case UPD_CXX_ADDED_FUNCTION_DEFINITION:
// An updated body is emitted last, so that the reader doesn't need
// to skip over the lazy body to reach statements for other records.
Record.pop_back();
HasUpdatedBody = true;
break;
+ case UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER:
+ AddSourceLocation(Update.getLoc(), Record);
+ break;
+
case UPD_CXX_INSTANTIATED_CLASS_DEFINITION: {
auto *RD = cast<CXXRecordDecl>(D);
AddUpdatedDeclContext(RD->getPrimaryContext());
@@ -4582,8 +4821,8 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
// Instantiation may change attributes; write them all out afresh.
Record.push_back(D->hasAttrs());
if (Record.back())
- WriteAttributes(ArrayRef<const Attr*>(D->getAttrs().begin(),
- D->getAttrs().size()), Record);
+ WriteAttributes(llvm::makeArrayRef(D->getAttrs().begin(),
+ D->getAttrs().size()), Record);
// FIXME: Ensure we don't get here for explicit instantiations.
break;
@@ -4607,15 +4846,21 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
case UPD_STATIC_LOCAL_NUMBER:
Record.push_back(Update.getNumber());
break;
+ case UPD_DECL_MARKED_OPENMP_THREADPRIVATE:
+ AddSourceRange(D->getAttr<OMPThreadPrivateDeclAttr>()->getRange(),
+ Record);
+ break;
}
}
if (HasUpdatedBody) {
const FunctionDecl *Def = cast<FunctionDecl>(D);
- Record.push_back(UPD_CXX_INSTANTIATED_FUNCTION_DEFINITION);
+ Record.push_back(UPD_CXX_ADDED_FUNCTION_DEFINITION);
Record.push_back(Def->isInlined());
AddSourceLocation(Def->getInnerLocStart(), Record);
AddFunctionDefinition(Def, Record);
+ if (auto *DD = dyn_cast<CXXDestructorDecl>(Def))
+ Record.push_back(GetDeclRef(DD->getOperatorDelete()));
}
OffsetsRecord.push_back(GetDeclRef(D));
@@ -4981,6 +5226,30 @@ void ASTWriter::AddDeclarationName(DeclarationName Name, RecordDataImpl &Record)
}
}
+unsigned ASTWriter::getAnonymousDeclarationNumber(const NamedDecl *D) {
+ assert(needsAnonymousDeclarationNumber(D) &&
+ "expected an anonymous declaration");
+
+ // Number the anonymous declarations within this context, if we've not
+ // already done so.
+ auto It = AnonymousDeclarationNumbers.find(D);
+ if (It == AnonymousDeclarationNumbers.end()) {
+ unsigned Index = 0;
+ for (Decl *LexicalD : D->getLexicalDeclContext()->decls()) {
+ auto *ND = dyn_cast<NamedDecl>(LexicalD);
+ if (!ND || !needsAnonymousDeclarationNumber(ND))
+ continue;
+ AnonymousDeclarationNumbers[ND] = Index++;
+ }
+
+ It = AnonymousDeclarationNumbers.find(D);
+ assert(It != AnonymousDeclarationNumbers.end() &&
+ "declaration not found within its lexical context");
+ }
+
+ return It->second;
+}
+
void ASTWriter::AddDeclarationNameLoc(const DeclarationNameLoc &DNLoc,
DeclarationName Name, RecordDataImpl &Record) {
switch (Name.getNameKind()) {
@@ -5068,6 +5337,10 @@ void ASTWriter::AddNestedNameSpecifier(NestedNameSpecifier *NNS,
case NestedNameSpecifier::Global:
// Don't need to write an associated value.
break;
+
+ case NestedNameSpecifier::Super:
+ AddDeclRef(NNS->getAsRecordDecl(), Record);
+ break;
}
}
}
@@ -5117,6 +5390,11 @@ void ASTWriter::AddNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
case NestedNameSpecifier::Global:
AddSourceLocation(NNS.getLocalSourceRange().getEnd(), Record);
break;
+
+ case NestedNameSpecifier::Super:
+ AddDeclRef(NNS.getNestedNameSpecifier()->getAsRecordDecl(), Record);
+ AddSourceRange(NNS.getLocalSourceRange(), Record);
+ break;
}
}
}
@@ -5186,7 +5464,7 @@ void ASTWriter::AddTemplateArgument(const TemplateArgument &Arg,
break;
case TemplateArgument::Declaration:
AddDeclRef(Arg.getAsDecl(), Record);
- Record.push_back(Arg.isDeclForReferenceParam());
+ AddTypeRef(Arg.getParamTypeForDecl(), Record);
break;
case TemplateArgument::NullPtr:
AddTypeRef(Arg.getNullPtrType(), Record);
@@ -5418,6 +5696,7 @@ void ASTWriter::AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Rec
Record.push_back(Capture.getCaptureKind());
switch (Capture.getCaptureKind()) {
case LCK_This:
+ case LCK_VLAType:
break;
case LCK_ByCopy:
case LCK_ByRef:
@@ -5521,8 +5800,6 @@ void ASTWriter::CompletedTagDefinition(const TagDecl *D) {
}
void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) {
- assert(!WritingAST && "Already writing the AST!");
-
// TU and namespaces are handled elsewhere.
if (isa<TranslationUnitDecl>(DC) || isa<NamespaceDecl>(DC))
return;
@@ -5531,12 +5808,12 @@ void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) {
return; // Not a source decl added to a DeclContext from PCH.
assert(!getDefinitiveDeclContext(DC) && "DeclContext not definitive!");
+ assert(!WritingAST && "Already writing the AST!");
AddUpdatedDeclContext(DC);
UpdatingVisibleDecls.push_back(D);
}
void ASTWriter::AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) {
- assert(!WritingAST && "Already writing the AST!");
assert(D->isImplicit());
if (!(!D->isFromASTFile() && RD->isFromASTFile()))
return; // Not a source member added to a class from PCH.
@@ -5545,17 +5822,18 @@ void ASTWriter::AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) {
// A decl coming from PCH was modified.
assert(RD->isCompleteDefinition());
+ assert(!WritingAST && "Already writing the AST!");
DeclUpdates[RD].push_back(DeclUpdate(UPD_CXX_ADDED_IMPLICIT_MEMBER, D));
}
void ASTWriter::AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
const ClassTemplateSpecializationDecl *D) {
// The specializations set is kept in the canonical template.
- assert(!WritingAST && "Already writing the AST!");
TD = TD->getCanonicalDecl();
if (!(!D->isFromASTFile() && TD->isFromASTFile()))
return; // Not a source specialization added to a template from PCH.
+ assert(!WritingAST && "Already writing the AST!");
DeclUpdates[TD].push_back(DeclUpdate(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
D));
}
@@ -5563,11 +5841,11 @@ void ASTWriter::AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
void ASTWriter::AddedCXXTemplateSpecialization(
const VarTemplateDecl *TD, const VarTemplateSpecializationDecl *D) {
// The specializations set is kept in the canonical template.
- assert(!WritingAST && "Already writing the AST!");
TD = TD->getCanonicalDecl();
if (!(!D->isFromASTFile() && TD->isFromASTFile()))
return; // Not a source specialization added to a template from PCH.
+ assert(!WritingAST && "Already writing the AST!");
DeclUpdates[TD].push_back(DeclUpdate(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
D));
}
@@ -5575,11 +5853,11 @@ void ASTWriter::AddedCXXTemplateSpecialization(
void ASTWriter::AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
const FunctionDecl *D) {
// The specializations set is kept in the canonical template.
- assert(!WritingAST && "Already writing the AST!");
TD = TD->getCanonicalDecl();
if (!(!D->isFromASTFile() && TD->isFromASTFile()))
return; // Not a source specialization added to a template from PCH.
+ assert(!WritingAST && "Already writing the AST!");
DeclUpdates[TD].push_back(DeclUpdate(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
D));
}
@@ -5607,9 +5885,8 @@ void ASTWriter::CompletedImplicitDefinition(const FunctionDecl *D) {
if (!D->isFromASTFile())
return; // Declaration not imported from PCH.
- // Implicit decl from a PCH was defined.
- // FIXME: Should implicit definition be a separate FunctionDecl?
- RewriteDecl(D);
+ // Implicit function decl from a PCH was defined.
+ DeclUpdates[D].push_back(DeclUpdate(UPD_CXX_ADDED_FUNCTION_DEFINITION));
}
void ASTWriter::FunctionDefinitionInstantiated(const FunctionDecl *D) {
@@ -5617,10 +5894,8 @@ void ASTWriter::FunctionDefinitionInstantiated(const FunctionDecl *D) {
if (!D->isFromASTFile())
return;
- // Since the actual instantiation is delayed, this really means that we need
- // to update the instantiation location.
DeclUpdates[D].push_back(
- DeclUpdate(UPD_CXX_INSTANTIATED_FUNCTION_DEFINITION));
+ DeclUpdate(UPD_CXX_ADDED_FUNCTION_DEFINITION));
}
void ASTWriter::StaticDataMemberInstantiated(const VarDecl *D) {
@@ -5668,3 +5943,11 @@ void ASTWriter::DeclarationMarkedUsed(const Decl *D) {
DeclUpdates[D].push_back(DeclUpdate(UPD_DECL_MARKED_USED));
}
+
+void ASTWriter::DeclarationMarkedOpenMPThreadPrivate(const Decl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!D->isFromASTFile())
+ return;
+
+ DeclUpdates[D].push_back(DeclUpdate(UPD_DECL_MARKED_OPENMP_THREADPRIVATE));
+}
diff --git a/lib/Serialization/ASTWriterDecl.cpp b/lib/Serialization/ASTWriterDecl.cpp
index 47ce747d5bd3..4017ec63d4f2 100644
--- a/lib/Serialization/ASTWriterDecl.cpp
+++ b/lib/Serialization/ASTWriterDecl.cpp
@@ -167,8 +167,8 @@ void ASTDeclWriter::VisitDecl(Decl *D) {
Record.push_back(D->isInvalidDecl());
Record.push_back(D->hasAttrs());
if (D->hasAttrs())
- Writer.WriteAttributes(ArrayRef<const Attr*>(D->getAttrs().begin(),
- D->getAttrs().size()), Record);
+ Writer.WriteAttributes(llvm::makeArrayRef(D->getAttrs().begin(),
+ D->getAttrs().size()), Record);
Record.push_back(D->isImplicit());
Record.push_back(D->isUsed(false));
Record.push_back(D->isReferenced());
@@ -203,6 +203,8 @@ void ASTDeclWriter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
void ASTDeclWriter::VisitNamedDecl(NamedDecl *D) {
VisitDecl(D);
Writer.AddDeclarationName(D->getDeclName(), Record);
+ if (needsAnonymousDeclarationNumber(D))
+ Record.push_back(Writer.getAnonymousDeclarationNumber(D));
}
void ASTDeclWriter::VisitTypeDecl(TypeDecl *D) {
@@ -224,13 +226,11 @@ void ASTDeclWriter::VisitTypedefDecl(TypedefDecl *D) {
VisitTypedefNameDecl(D);
if (!D->hasAttrs() &&
!D->isImplicit() &&
- !D->isUsed(false) &&
D->getFirstDecl() == D->getMostRecentDecl() &&
!D->isInvalidDecl() &&
- !D->isReferenced() &&
!D->isTopLevelDeclInObjCContainer() &&
- D->getAccess() == AS_none &&
!D->isModulePrivate() &&
+ !needsAnonymousDeclarationNumber(D) &&
D->getDeclName().getNameKind() == DeclarationName::Identifier)
AbbrevToUse = Writer.getDeclTypedefAbbrev();
@@ -239,6 +239,7 @@ void ASTDeclWriter::VisitTypedefDecl(TypedefDecl *D) {
void ASTDeclWriter::VisitTypeAliasDecl(TypeAliasDecl *D) {
VisitTypedefNameDecl(D);
+ Writer.AddDeclRef(D->getDescribedAliasTemplate(), Record);
Code = serialization::DECL_TYPEALIAS;
}
@@ -247,18 +248,26 @@ void ASTDeclWriter::VisitTagDecl(TagDecl *D) {
VisitTypeDecl(D);
Record.push_back(D->getIdentifierNamespace());
Record.push_back((unsigned)D->getTagKind()); // FIXME: stable encoding
- Record.push_back(D->isCompleteDefinition());
+ if (!isa<CXXRecordDecl>(D))
+ Record.push_back(D->isCompleteDefinition());
Record.push_back(D->isEmbeddedInDeclarator());
Record.push_back(D->isFreeStanding());
Record.push_back(D->isCompleteDefinitionRequired());
Writer.AddSourceLocation(D->getRBraceLoc(), Record);
- Record.push_back(D->hasExtInfo());
- if (D->hasExtInfo())
+
+ if (D->hasExtInfo()) {
+ Record.push_back(1);
Writer.AddQualifierInfo(*D->getExtInfo(), Record);
- else if (D->hasDeclaratorForAnonDecl())
- Writer.AddDeclRef(D->getDeclaratorForAnonDecl(), Record);
- else
- Writer.AddDeclRef(D->getTypedefNameForAnonDecl(), Record);
+ } else if (auto *TD = D->getTypedefNameForAnonDecl()) {
+ Record.push_back(2);
+ Writer.AddDeclRef(TD, Record);
+ Writer.AddIdentifierRef(TD->getDeclName().getAsIdentifierInfo(), Record);
+ } else if (auto *DD = D->getDeclaratorForAnonDecl()) {
+ Record.push_back(3);
+ Writer.AddDeclRef(DD, Record);
+ } else {
+ Record.push_back(0);
+ }
}
void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
@@ -284,6 +293,8 @@ void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
!D->isImplicit() &&
!D->isUsed(false) &&
!D->hasExtInfo() &&
+ !D->getTypedefNameForAnonDecl() &&
+ !D->getDeclaratorForAnonDecl() &&
D->getFirstDecl() == D->getMostRecentDecl() &&
!D->isInvalidDecl() &&
!D->isReferenced() &&
@@ -293,6 +304,7 @@ void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
!CXXRecordDecl::classofKind(D->getKind()) &&
!D->getIntegerTypeSourceInfo() &&
!D->getMemberSpecializationInfo() &&
+ !needsAnonymousDeclarationNumber(D) &&
D->getDeclName().getNameKind() == DeclarationName::Identifier)
AbbrevToUse = Writer.getDeclEnumAbbrev();
@@ -310,6 +322,8 @@ void ASTDeclWriter::VisitRecordDecl(RecordDecl *D) {
!D->isImplicit() &&
!D->isUsed(false) &&
!D->hasExtInfo() &&
+ !D->getTypedefNameForAnonDecl() &&
+ !D->getDeclaratorForAnonDecl() &&
D->getFirstDecl() == D->getMostRecentDecl() &&
!D->isInvalidDecl() &&
!D->isReferenced() &&
@@ -317,6 +331,7 @@ void ASTDeclWriter::VisitRecordDecl(RecordDecl *D) {
D->getAccess() == AS_none &&
!D->isModulePrivate() &&
!CXXRecordDecl::classofKind(D->getKind()) &&
+ !needsAnonymousDeclarationNumber(D) &&
D->getDeclName().getNameKind() == DeclarationName::Identifier)
AbbrevToUse = Writer.getDeclRecordAbbrev();
@@ -349,7 +364,6 @@ void ASTDeclWriter::VisitDeclaratorDecl(DeclaratorDecl *D) {
void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
VisitRedeclarable(D);
VisitDeclaratorDecl(D);
-
Writer.AddDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record);
Record.push_back(D->getIdentifierNamespace());
@@ -663,12 +677,17 @@ void ASTDeclWriter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
void ASTDeclWriter::VisitFieldDecl(FieldDecl *D) {
VisitDeclaratorDecl(D);
Record.push_back(D->isMutable());
- if (D->InitializerOrBitWidth.getInt() != ICIS_NoInit ||
- D->InitializerOrBitWidth.getPointer()) {
- Record.push_back(D->InitializerOrBitWidth.getInt() + 1);
- Writer.AddStmt(D->InitializerOrBitWidth.getPointer());
- } else {
+ if (D->InitStorage.getInt() == FieldDecl::ISK_BitWidthOrNothing &&
+ D->InitStorage.getPointer() == nullptr) {
Record.push_back(0);
+ } else if (D->InitStorage.getInt() == FieldDecl::ISK_CapturedVLAType) {
+ Record.push_back(D->InitStorage.getInt() + 1);
+ Writer.AddTypeRef(
+ QualType(static_cast<Type *>(D->InitStorage.getPointer()), 0),
+ Record);
+ } else {
+ Record.push_back(D->InitStorage.getInt() + 1);
+ Writer.AddStmt(static_cast<Expr *>(D->InitStorage.getPointer()));
}
if (!D->getDeclName())
Writer.AddDeclRef(Context.getInstantiatedFromUnnamedFieldDecl(D), Record);
@@ -753,6 +772,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
!D->isTopLevelDeclInObjCContainer() &&
D->getAccess() == AS_none &&
!D->isModulePrivate() &&
+ !needsAnonymousDeclarationNumber(D) &&
D->getDeclName().getNameKind() == DeclarationName::Identifier &&
!D->hasExtInfo() &&
D->getFirstDecl() == D->getMostRecentDecl() &&
@@ -930,6 +950,7 @@ void ASTDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) {
}
void ASTDeclWriter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ VisitRedeclarable(D);
VisitNamedDecl(D);
Writer.AddSourceLocation(D->getNamespaceLoc(), Record);
Writer.AddSourceLocation(D->getTargetNameLoc(), Record);
@@ -1027,6 +1048,17 @@ void ASTDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) {
// We only need to record overridden methods once for the canonical decl.
Record.push_back(0);
}
+
+ if (D->getFirstDecl() == D->getMostRecentDecl() &&
+ !D->isInvalidDecl() &&
+ !D->hasAttrs() &&
+ !D->isTopLevelDeclInObjCContainer() &&
+ D->getDeclName().getNameKind() == DeclarationName::Identifier &&
+ !D->hasExtInfo() &&
+ !D->hasInheritedPrototype() &&
+ D->hasWrittenPrototype())
+ AbbrevToUse = Writer.getDeclCXXMethodAbbrev();
+
Code = serialization::DECL_CXX_METHOD;
}
@@ -1449,7 +1481,7 @@ void ASTDeclWriter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
-void ASTWriter::WriteDeclsBlockAbbrevs() {
+void ASTWriter::WriteDeclAbbrevs() {
using namespace llvm;
BitCodeAbbrev *Abv;
@@ -1552,8 +1584,7 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFreeStanding
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsCompleteDefinitionRequired
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SourceLocation
- Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypedefNameAnonDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // ExtInfoKind
// EnumDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // AddTypeRef
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IntegerType
@@ -1600,8 +1631,7 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFreeStanding
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsCompleteDefinitionRequired
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SourceLocation
- Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypedefNameAnonDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // ExtInfoKind
// RecordDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // FlexibleArrayMember
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // AnonymousStructUnion
@@ -1676,10 +1706,10 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
- Abv->Add(BitCodeAbbrevOp(0)); // isUsed
- Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isReferenced
Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
- Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // C++ AccessSpecifier
Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
@@ -1738,6 +1768,63 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
DeclVarAbbrev = Stream.EmitAbbrev(Abv);
+ // Abbreviation for DECL_CXX_METHOD
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_CXX_METHOD));
+ // RedeclarableDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // CanonicalDecl
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // Invalid
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Implicit
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Used
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Referenced
+ Abv->Add(BitCodeAbbrevOp(0)); // InObjCContainer
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Access
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(DeclarationName::Identifier)); // NameKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Identifier
+ // ValueDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // DeclaratorDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerLocStart
+ Abv->Add(BitCodeAbbrevOp(0)); // HasExtInfo
+ // FunctionDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 11)); // IDNS
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // StorageClass
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Inline
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InlineSpecified
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // VirtualAsWritten
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Pure
+ Abv->Add(BitCodeAbbrevOp(0)); // HasInheritedProto
+ Abv->Add(BitCodeAbbrevOp(1)); // HasWrittenProto
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // DeletedAsWritten
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Trivial
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Defaulted
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ExplicitlyDefaulted
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ImplicitReturnZero
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Constexpr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // SkippedBody
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // LateParsed
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LocEnd
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // TemplateKind
+ // This Array slurps the rest of the record. Fortunately we want to encode
+ // (nearly) all the remaining (variable number of) fields in the same way.
+ //
+ // This is the function template information if any, then
+ // NumParams and Params[] from FunctionDecl, and
+ // NumOverriddenMethods, OverriddenMethods[] from CXXMethodDecl.
+ //
+ // Add an AbbrevOp for 'size then elements' and use it here.
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ DeclCXXMethodAbbrev = Stream.EmitAbbrev(Abv);
+
// Abbreviation for EXPR_DECL_REF
Abv = new BitCodeAbbrev();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_DECL_REF));
@@ -1755,7 +1842,8 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //GetDeclFound
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ExplicitTemplateArgs
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //HadMultipleCandidates
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //RefersToEnclosingLocal
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 1)); // RefersToEnclosingVariableOrCapture
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclRef
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
DeclRefExprAbbrev = Stream.EmitAbbrev(Abv);
@@ -1796,6 +1884,24 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // getKind
CharacterLiteralAbbrev = Stream.EmitAbbrev(Abv);
+ // Abbreviation for EXPR_IMPLICIT_CAST
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_IMPLICIT_CAST));
+ // Stmt
+ // Expr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
+ // CastExpr
+ Abv->Add(BitCodeAbbrevOp(0)); // PathSize
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 6)); // CastKind
+ // ImplicitCastExpr
+ ExprImplicitCastAbbrev = Stream.EmitAbbrev(Abv);
+
Abv = new BitCodeAbbrev();
Abv->Add(BitCodeAbbrevOp(serialization::DECL_CONTEXT_LEXICAL));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
diff --git a/lib/Serialization/ASTWriterStmt.cpp b/lib/Serialization/ASTWriterStmt.cpp
index a43b3527a711..e980ce783f5e 100644
--- a/lib/Serialization/ASTWriterStmt.cpp
+++ b/lib/Serialization/ASTWriterStmt.cpp
@@ -307,7 +307,7 @@ void ASTStmtWriter::VisitCapturedStmt(CapturedStmt *S) {
// Captures
for (const auto &I : S->captures()) {
- if (I.capturesThis())
+ if (I.capturesThis() || I.capturesVariableArrayType())
Writer.AddDeclRef(nullptr, Record);
else
Writer.AddDeclRef(I.getCapturedVar(), Record);
@@ -333,6 +333,7 @@ void ASTStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) {
VisitExpr(E);
Writer.AddSourceLocation(E->getLocation(), Record);
Record.push_back(E->getIdentType()); // FIXME: stable encoding
+ Writer.AddStmt(E->getFunctionName());
Code = serialization::EXPR_PREDEFINED;
}
@@ -343,7 +344,7 @@ void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
Record.push_back(E->getDecl() != E->getFoundDecl());
Record.push_back(E->hasTemplateKWAndArgsInfo());
Record.push_back(E->hadMultipleCandidates());
- Record.push_back(E->refersToEnclosingLocal());
+ Record.push_back(E->refersToEnclosingVariableOrCapture());
if (E->hasTemplateKWAndArgsInfo()) {
unsigned NumTemplateArgs = E->getNumTemplateArgs();
@@ -635,6 +636,10 @@ ASTStmtWriter::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
void ASTStmtWriter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
VisitCastExpr(E);
+
+ if (E->path_size() == 0)
+ AbbrevToUse = Writer.getExprImplicitCastAbbrev();
+
Code = serialization::EXPR_IMPLICIT_CAST;
}
@@ -1574,6 +1579,17 @@ void ASTStmtWriter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
Code = serialization::EXPR_MATERIALIZE_TEMPORARY;
}
+void ASTStmtWriter::VisitCXXFoldExpr(CXXFoldExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->LParenLoc, Record);
+ Writer.AddSourceLocation(E->EllipsisLoc, Record);
+ Writer.AddSourceLocation(E->RParenLoc, Record);
+ Writer.AddStmt(E->SubExprs[0]);
+ Writer.AddStmt(E->SubExprs[1]);
+ Record.push_back(E->Opcode);
+ Code = serialization::EXPR_CXX_FOLD;
+}
+
void ASTStmtWriter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
VisitExpr(E);
Writer.AddStmt(E->getSourceExpr());
@@ -1581,6 +1597,12 @@ void ASTStmtWriter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
Code = serialization::EXPR_OPAQUE_VALUE;
}
+void ASTStmtWriter::VisitTypoExpr(TypoExpr *E) {
+ VisitExpr(E);
+ // TODO: Figure out sane writer behavior for a TypoExpr, if necessary
+ assert(false && "Cannot write TypoExpr nodes");
+}
+
//===----------------------------------------------------------------------===//
// CUDA Expressions and Statements.
//===----------------------------------------------------------------------===//
@@ -1735,18 +1757,39 @@ void OMPClauseWriter::VisitOMPUntiedClause(OMPUntiedClause *) {}
void OMPClauseWriter::VisitOMPMergeableClause(OMPMergeableClause *) {}
+void OMPClauseWriter::VisitOMPReadClause(OMPReadClause *) {}
+
+void OMPClauseWriter::VisitOMPWriteClause(OMPWriteClause *) {}
+
+void OMPClauseWriter::VisitOMPUpdateClause(OMPUpdateClause *) {}
+
+void OMPClauseWriter::VisitOMPCaptureClause(OMPCaptureClause *) {}
+
+void OMPClauseWriter::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
+
void OMPClauseWriter::VisitOMPPrivateClause(OMPPrivateClause *C) {
Record.push_back(C->varlist_size());
Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
- for (auto *VE : C->varlists())
+ for (auto *VE : C->varlists()) {
+ Writer->Writer.AddStmt(VE);
+ }
+ for (auto *VE : C->private_copies()) {
Writer->Writer.AddStmt(VE);
+ }
}
void OMPClauseWriter::VisitOMPFirstprivateClause(OMPFirstprivateClause *C) {
Record.push_back(C->varlist_size());
Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
- for (auto *VE : C->varlists())
+ for (auto *VE : C->varlists()) {
+ Writer->Writer.AddStmt(VE);
+ }
+ for (auto *VE : C->private_copies()) {
+ Writer->Writer.AddStmt(VE);
+ }
+ for (auto *VE : C->inits()) {
Writer->Writer.AddStmt(VE);
+ }
}
void OMPClauseWriter::VisitOMPLastprivateClause(OMPLastprivateClause *C) {
@@ -1826,29 +1869,61 @@ void ASTStmtWriter::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
Writer.AddStmt(E->getAssociatedStmt());
}
-void ASTStmtWriter::VisitOMPParallelDirective(OMPParallelDirective *D) {
+void ASTStmtWriter::VisitOMPLoopDirective(OMPLoopDirective *D) {
VisitStmt(D);
Record.push_back(D->getNumClauses());
+ Record.push_back(D->getCollapsedNumber());
VisitOMPExecutableDirective(D);
- Code = serialization::STMT_OMP_PARALLEL_DIRECTIVE;
+ Writer.AddStmt(D->getIterationVariable());
+ Writer.AddStmt(D->getLastIteration());
+ Writer.AddStmt(D->getCalcLastIteration());
+ Writer.AddStmt(D->getPreCond());
+ Writer.AddStmt(D->getCond(/* SeparateIter */ false));
+ Writer.AddStmt(D->getCond(/* SeparateIter */ true));
+ Writer.AddStmt(D->getInit());
+ Writer.AddStmt(D->getInc());
+ if (isOpenMPWorksharingDirective(D->getDirectiveKind())) {
+ Writer.AddStmt(D->getIsLastIterVariable());
+ Writer.AddStmt(D->getLowerBoundVariable());
+ Writer.AddStmt(D->getUpperBoundVariable());
+ Writer.AddStmt(D->getStrideVariable());
+ Writer.AddStmt(D->getEnsureUpperBound());
+ Writer.AddStmt(D->getNextLowerBound());
+ Writer.AddStmt(D->getNextUpperBound());
+ }
+ for (auto I : D->counters()) {
+ Writer.AddStmt(I);
+ }
+ for (auto I : D->updates()) {
+ Writer.AddStmt(I);
+ }
+ for (auto I : D->finals()) {
+ Writer.AddStmt(I);
+ }
}
-void ASTStmtWriter::VisitOMPSimdDirective(OMPSimdDirective *D) {
+void ASTStmtWriter::VisitOMPParallelDirective(OMPParallelDirective *D) {
VisitStmt(D);
Record.push_back(D->getNumClauses());
- Record.push_back(D->getCollapsedNumber());
VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_PARALLEL_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPSimdDirective(OMPSimdDirective *D) {
+ VisitOMPLoopDirective(D);
Code = serialization::STMT_OMP_SIMD_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPForDirective(OMPForDirective *D) {
- VisitStmt(D);
- Record.push_back(D->getNumClauses());
- Record.push_back(D->getCollapsedNumber());
- VisitOMPExecutableDirective(D);
+ VisitOMPLoopDirective(D);
Code = serialization::STMT_OMP_FOR_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPForSimdDirective(OMPForSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_FOR_SIMD_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
VisitStmt(D);
Record.push_back(D->getNumClauses());
@@ -1883,13 +1958,16 @@ void ASTStmtWriter::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
}
void ASTStmtWriter::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
- VisitStmt(D);
- Record.push_back(D->getNumClauses());
- Record.push_back(D->getCollapsedNumber());
- VisitOMPExecutableDirective(D);
+ VisitOMPLoopDirective(D);
Code = serialization::STMT_OMP_PARALLEL_FOR_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPParallelForSimdDirective(
+ OMPParallelForSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_PARALLEL_FOR_SIMD_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
VisitStmt(D);
@@ -1905,6 +1983,23 @@ void ASTStmtWriter::VisitOMPTaskDirective(OMPTaskDirective *D) {
Code = serialization::STMT_OMP_TASK_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPAtomicDirective(OMPAtomicDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Writer.AddStmt(D->getX());
+ Writer.AddStmt(D->getV());
+ Writer.AddStmt(D->getExpr());
+ Code = serialization::STMT_OMP_ATOMIC_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTargetDirective(OMPTargetDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_TARGET_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPTaskyieldDirective(OMPTaskyieldDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
@@ -1930,6 +2025,19 @@ void ASTStmtWriter::VisitOMPFlushDirective(OMPFlushDirective *D) {
Code = serialization::STMT_OMP_FLUSH_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_ORDERED_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTeamsDirective(OMPTeamsDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_TEAMS_DIRECTIVE;
+}
+
//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/Serialization/GlobalModuleIndex.cpp b/lib/Serialization/GlobalModuleIndex.cpp
index 985812232505..479138804547 100644
--- a/lib/Serialization/GlobalModuleIndex.cpp
+++ b/lib/Serialization/GlobalModuleIndex.cpp
@@ -122,11 +122,10 @@ typedef llvm::OnDiskIterableChainedHashTable<IdentifierIndexReaderTrait>
}
-GlobalModuleIndex::GlobalModuleIndex(llvm::MemoryBuffer *Buffer,
+GlobalModuleIndex::GlobalModuleIndex(std::unique_ptr<llvm::MemoryBuffer> Buffer,
llvm::BitstreamCursor Cursor)
- : Buffer(Buffer), IdentifierIndex(),
- NumIdentifierLookups(), NumIdentifierLookupHits()
-{
+ : Buffer(std::move(Buffer)), IdentifierIndex(), NumIdentifierLookups(),
+ NumIdentifierLookupHits() {
// Read the global index.
bool InGlobalIndexBlock = false;
bool Done = false;
@@ -260,7 +259,7 @@ GlobalModuleIndex::readIndex(StringRef Path) {
return std::make_pair(nullptr, EC_IOError);
}
- return std::make_pair(new GlobalModuleIndex(Buffer.release(), Cursor),
+ return std::make_pair(new GlobalModuleIndex(std::move(Buffer), Cursor),
EC_None);
}
@@ -494,19 +493,17 @@ namespace {
bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// Open the module file.
- std::unique_ptr<llvm::MemoryBuffer> Buffer;
- std::string ErrorStr;
- Buffer.reset(FileMgr.getBufferForFile(File, &ErrorStr, /*isVolatile=*/true));
+
+ auto Buffer = FileMgr.getBufferForFile(File, /*isVolatile=*/true);
if (!Buffer) {
return true;
}
// Initialize the input stream
llvm::BitstreamReader InStreamFile;
- llvm::BitstreamCursor InStream;
- InStreamFile.init((const unsigned char *)Buffer->getBufferStart(),
- (const unsigned char *)Buffer->getBufferEnd());
- InStream.init(InStreamFile);
+ InStreamFile.init((const unsigned char *)(*Buffer)->getBufferStart(),
+ (const unsigned char *)(*Buffer)->getBufferEnd());
+ llvm::BitstreamCursor InStream(InStreamFile);
// Sniff for the signature.
if (InStream.Read(8) != 'C' ||
@@ -591,6 +588,10 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
off_t StoredSize = (off_t)Record[Idx++];
time_t StoredModTime = (time_t)Record[Idx++];
+ // Skip the stored signature.
+ // FIXME: we could read the signature out of the import and validate it.
+ Idx++;
+
// Retrieve the imported file name.
unsigned Length = Record[Idx++];
SmallString<128> ImportedFile(Record.begin() + Idx,
diff --git a/lib/Serialization/Module.cpp b/lib/Serialization/Module.cpp
index 6f2a3c220430..6c48a41187c2 100644
--- a/lib/Serialization/Module.cpp
+++ b/lib/Serialization/Module.cpp
@@ -21,7 +21,7 @@ using namespace serialization;
using namespace reader;
ModuleFile::ModuleFile(ModuleKind Kind, unsigned Generation)
- : Kind(Kind), File(nullptr), DirectlyImported(false),
+ : Kind(Kind), File(nullptr), Signature(0), DirectlyImported(false),
Generation(Generation), SizeInBits(0),
LocalNumSLocEntries(0), SLocEntryBaseID(0),
SLocEntryBaseOffset(0), SLocEntryOffsets(nullptr),
diff --git a/lib/Serialization/ModuleManager.cpp b/lib/Serialization/ModuleManager.cpp
index 2c10c119a07b..ac98ca0b8720 100644
--- a/lib/Serialization/ModuleManager.cpp
+++ b/lib/Serialization/ModuleManager.cpp
@@ -45,10 +45,11 @@ ModuleFile *ModuleManager::lookup(const FileEntry *File) {
return Known->second;
}
-llvm::MemoryBuffer *ModuleManager::lookupBuffer(StringRef Name) {
+std::unique_ptr<llvm::MemoryBuffer>
+ModuleManager::lookupBuffer(StringRef Name) {
const FileEntry *Entry = FileMgr.getFile(Name, /*openFile=*/false,
/*cacheFailure=*/false);
- return InMemoryBuffers[Entry];
+ return std::move(InMemoryBuffers[Entry]);
}
ModuleManager::AddModuleResult
@@ -56,6 +57,9 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
SourceLocation ImportLoc, ModuleFile *ImportedBy,
unsigned Generation,
off_t ExpectedSize, time_t ExpectedModTime,
+ ASTFileSignature ExpectedSignature,
+ std::function<ASTFileSignature(llvm::BitstreamReader &)>
+ ReadSignature,
ModuleFile *&Module,
std::string &ErrorStr) {
Module = nullptr;
@@ -63,6 +67,13 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
// Look for the file entry. This only fails if the expected size or
// modification time differ.
const FileEntry *Entry;
+ if (Type == MK_ExplicitModule) {
+ // If we're not expecting to pull this file out of the module cache, it
+ // might have a different mtime due to being moved across filesystems in
+ // a distributed build. The size must still match, though. (As must the
+ // contents, but we can't check that.)
+ ExpectedModTime = 0;
+ }
if (lookupModuleFile(FileName, ExpectedSize, ExpectedModTime, Entry)) {
ErrorStr = "module file out of date";
return OutOfDate;
@@ -88,7 +99,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
ModuleEntry = New;
New->InputFilesValidationTimestamp = 0;
- if (New->Kind == MK_Module) {
+ if (New->Kind == MK_ImplicitModule) {
std::string TimestampFilename = New->getTimestampFilename();
vfs::Status Status;
// A cached stat value would be fine as well.
@@ -98,39 +109,59 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
}
// Load the contents of the module
- if (llvm::MemoryBuffer *Buffer = lookupBuffer(FileName)) {
+ if (std::unique_ptr<llvm::MemoryBuffer> Buffer = lookupBuffer(FileName)) {
// The buffer was already provided for us.
- assert(Buffer && "Passed null buffer");
- New->Buffer.reset(Buffer);
+ New->Buffer = std::move(Buffer);
} else {
// Open the AST file.
- std::error_code ec;
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buf(
+ (std::error_code()));
if (FileName == "-") {
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buf =
- llvm::MemoryBuffer::getSTDIN();
- ec = Buf.getError();
- if (ec)
- ErrorStr = ec.message();
- else
- New->Buffer = std::move(Buf.get());
+ Buf = llvm::MemoryBuffer::getSTDIN();
} else {
// Leave the FileEntry open so if it gets read again by another
// ModuleManager it must be the same underlying file.
// FIXME: Because FileManager::getFile() doesn't guarantee that it will
// give us an open file, this may not be 100% reliable.
- New->Buffer.reset(FileMgr.getBufferForFile(New->File, &ErrorStr,
- /*IsVolatile*/false,
- /*ShouldClose*/false));
+ Buf = FileMgr.getBufferForFile(New->File,
+ /*IsVolatile=*/false,
+ /*ShouldClose=*/false);
}
-
- if (!New->Buffer)
+
+ if (!Buf) {
+ ErrorStr = Buf.getError().message();
return Missing;
+ }
+
+ New->Buffer = std::move(*Buf);
}
// Initialize the stream
New->StreamFile.init((const unsigned char *)New->Buffer->getBufferStart(),
(const unsigned char *)New->Buffer->getBufferEnd());
}
+
+ if (ExpectedSignature) {
+ if (NewModule)
+ ModuleEntry->Signature = ReadSignature(ModuleEntry->StreamFile);
+ else
+ assert(ModuleEntry->Signature == ReadSignature(ModuleEntry->StreamFile));
+
+ if (ModuleEntry->Signature != ExpectedSignature) {
+ ErrorStr = ModuleEntry->Signature ? "signature mismatch"
+ : "could not read module signature";
+
+ if (NewModule) {
+ // Remove the module file immediately, since removeModules might try to
+ // invalidate the file cache for Entry, and that is not safe if this
+ // module is *itself* up to date, but has an out-of-date importer.
+ Modules.erase(Entry);
+ Chain.pop_back();
+ delete ModuleEntry;
+ }
+ return OutOfDate;
+ }
+ }
if (ImportedBy) {
ModuleEntry->ImportedBy.insert(ImportedBy);
@@ -187,12 +218,13 @@ void ModuleManager::removeModules(
Chain.erase(first, last);
}
-void ModuleManager::addInMemoryBuffer(StringRef FileName,
- llvm::MemoryBuffer *Buffer) {
-
- const FileEntry *Entry = FileMgr.getVirtualFile(FileName,
- Buffer->getBufferSize(), 0);
- InMemoryBuffers[Entry] = Buffer;
+void
+ModuleManager::addInMemoryBuffer(StringRef FileName,
+ std::unique_ptr<llvm::MemoryBuffer> Buffer) {
+
+ const FileEntry *Entry =
+ FileMgr.getVirtualFile(FileName, Buffer->getBufferSize(), 0);
+ InMemoryBuffers[Entry] = std::move(Buffer);
}
ModuleManager::VisitState *ModuleManager::allocateVisitState() {
@@ -249,7 +281,7 @@ ModuleManager::~ModuleManager() {
void
ModuleManager::visit(bool (*Visitor)(ModuleFile &M, void *UserData),
void *UserData,
- llvm::SmallPtrSet<ModuleFile *, 4> *ModuleFilesHit) {
+ llvm::SmallPtrSetImpl<ModuleFile *> *ModuleFilesHit) {
// If the visitation order vector is the wrong size, recompute the order.
if (VisitOrder.size() != Chain.size()) {
unsigned N = size();
diff --git a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h b/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
index 2b314a3b749a..048418ef62db 100644
--- a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
+++ b/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SA_LIB_CHECKERS_ALLOC_DIAGS_H
-#define LLVM_CLANG_SA_LIB_CHECKERS_ALLOC_DIAGS_H
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ALLOCATIONDIAGNOSTICS_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ALLOCATIONDIAGNOSTICS_H
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 20360efccfe4..e462e2b2f15e 100644
--- a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -218,17 +218,6 @@ void RegionRawOffsetV2::dumpToStream(raw_ostream &os) const {
os << "raw_offset_v2{" << getRegion() << ',' << getByteOffset() << '}';
}
-// FIXME: Merge with the implementation of the same method in Store.cpp
-static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *D = RT->getDecl();
- if (!D->getDefinition())
- return false;
- }
-
- return true;
-}
-
// Lazily computes a value to be used by 'computeOffset'. If 'val'
// is unknown or undefined, we lazily substitute '0'. Otherwise,
@@ -288,7 +277,7 @@ RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(ProgramStateRef state,
QualType elemType = elemReg->getElementType();
// If the element is an incomplete type, go no further.
ASTContext &astContext = svalBuilder.getContext();
- if (!IsCompleteType(astContext, elemType))
+ if (elemType->isIncompleteType())
return RegionRawOffsetV2();
// Update the offset.
diff --git a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index a87104984883..104a81eac4c7 100644
--- a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -42,8 +42,10 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
return false;
case Builtin::BI__builtin_expect:
+ case Builtin::BI__builtin_assume_aligned:
case Builtin::BI__builtin_addressof: {
- // For __builtin_expect, just return the value of the subexpression.
+ // For __builtin_expect and __builtin_assume_aligned, just return the value
+ // of the subexpression.
// __builtin_addressof is going from a reference to a pointer, but those
// are represented the same way in the analyzer.
assert (CE->arg_begin() != CE->arg_end());
diff --git a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 0693bd6fd94d..e91a7e16802e 100644
--- a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -969,8 +969,13 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
// Get the length to copy.
if (Optional<NonLoc> lenValNonLoc = sizeVal.getAs<NonLoc>()) {
// Get the byte after the last byte copied.
+ SValBuilder &SvalBuilder = C.getSValBuilder();
+ ASTContext &Ctx = SvalBuilder.getContext();
+ QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
+ loc::MemRegionVal DestRegCharVal = SvalBuilder.evalCast(destRegVal,
+ CharPtrTy, Dest->getType()).castAs<loc::MemRegionVal>();
SVal lastElement = C.getSValBuilder().evalBinOpLN(state, BO_Add,
- destRegVal,
+ DestRegCharVal,
*lenValNonLoc,
Dest->getType());
diff --git a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index d186144b9ddd..339af8f03324 100644
--- a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -28,27 +28,6 @@
using namespace clang;
using namespace ento;
-static bool scan_dealloc(Stmt *S, Selector Dealloc) {
-
- if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
- if (ME->getSelector() == Dealloc) {
- switch (ME->getReceiverKind()) {
- case ObjCMessageExpr::Instance: return false;
- case ObjCMessageExpr::SuperInstance: return true;
- case ObjCMessageExpr::Class: break;
- case ObjCMessageExpr::SuperClass: break;
- }
- }
-
- // Recurse to children.
-
- for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
- if (*I && scan_dealloc(*I, Dealloc))
- return true;
-
- return false;
-}
-
static bool scan_ivar_release(Stmt *S, ObjCIvarDecl *ID,
const ObjCPropertyDecl *PD,
Selector Release,
@@ -181,24 +160,6 @@ static void checkObjCDealloc(const CheckerBase *Checker,
return;
}
- // dealloc found. Scan for missing [super dealloc].
- if (MD->getBody() && !scan_dealloc(MD->getBody(), S)) {
-
- const char* name = LOpts.getGC() == LangOptions::NonGC
- ? "missing [super dealloc]"
- : "missing [super dealloc] (Hybrid MM, non-GC)";
-
- std::string buf;
- llvm::raw_string_ostream os(buf);
- os << "The 'dealloc' instance method in Objective-C class '" << *D
- << "' does not send a 'dealloc' message to its super class"
- " (missing [super dealloc])";
-
- BR.EmitBasicReport(MD, Checker, name, categories::CoreFoundationObjectiveC,
- os.str(), DLoc);
- return;
- }
-
// Get the "release" selector.
IdentifierInfo* RII = &Ctx.Idents.get("release");
Selector RS = Ctx.Selectors.getSelector(0, &RII);
diff --git a/lib/StaticAnalyzer/Checkers/Checkers.td b/lib/StaticAnalyzer/Checkers/Checkers.td
index 44eb64187bfb..ba5b4fa3c66c 100644
--- a/lib/StaticAnalyzer/Checkers/Checkers.td
+++ b/lib/StaticAnalyzer/Checkers/Checkers.td
@@ -184,6 +184,10 @@ def NewDeleteChecker : Checker<"NewDelete">,
HelpText<"Check for double-free and use-after-free problems. Traces memory managed by new/delete.">,
DescFile<"MallocChecker.cpp">;
+def NewDeleteLeaksChecker : Checker<"NewDeleteLeaks">,
+ HelpText<"Check for memory leaks. Traces memory managed by new/delete.">,
+ DescFile<"MallocChecker.cpp">;
+
} // end: "cplusplus"
let ParentPackage = CplusplusAlpha in {
@@ -192,10 +196,6 @@ def VirtualCallChecker : Checker<"VirtualCall">,
HelpText<"Check virtual function calls during construction or destruction">,
DescFile<"VirtualCallChecker.cpp">;
-def NewDeleteLeaksChecker : Checker<"NewDeleteLeaks">,
- HelpText<"Check for memory leaks. Traces memory managed by new/delete.">,
- DescFile<"MallocChecker.cpp">;
-
} // end: "alpha.cplusplus"
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Checkers/ClangSACheckers.h b/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
index de2ebce52c02..05b4a61c5af1 100644
--- a/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
+++ b/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SA_LIB_CHECKERS_CLANGSACHECKERS_H
-#define LLVM_CLANG_SA_LIB_CHECKERS_CLANGSACHECKERS_H
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_CLANGSACHECKERS_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_CLANGSACHECKERS_H
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
diff --git a/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp b/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
index d5c52b4c6a31..58d0783f3974 100644
--- a/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
@@ -445,7 +445,12 @@ static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
case Stmt::IntegerLiteralClass: {
const IntegerLiteral *IntLit1 = cast<IntegerLiteral>(Stmt1);
const IntegerLiteral *IntLit2 = cast<IntegerLiteral>(Stmt2);
- return IntLit1->getValue() == IntLit2->getValue();
+
+ llvm::APInt I1 = IntLit1->getValue();
+ llvm::APInt I2 = IntLit2->getValue();
+ if (I1.getBitWidth() != I2.getBitWidth())
+ return false;
+ return I1 == I2;
}
case Stmt::FloatingLiteralClass: {
const FloatingLiteral *FloatLit1 = cast<FloatingLiteral>(Stmt1);
@@ -455,7 +460,7 @@ static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
case Stmt::StringLiteralClass: {
const StringLiteral *StringLit1 = cast<StringLiteral>(Stmt1);
const StringLiteral *StringLit2 = cast<StringLiteral>(Stmt2);
- return StringLit1->getString() == StringLit2->getString();
+ return StringLit1->getBytes() == StringLit2->getBytes();
}
case Stmt::MemberExprClass: {
const MemberExpr *MemberStmt1 = cast<MemberExpr>(Stmt1);
diff --git a/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h b/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
index e35557f24bbc..b7549fd17da9 100644
--- a/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
+++ b/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
@@ -10,8 +10,8 @@
// inter-checker communications.
//===----------------------------------------------------------------------===//
-#ifndef INTERCHECKERAPI_H_
-#define INTERCHECKERAPI_H_
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_INTERCHECKERAPI_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_INTERCHECKERAPI_H
namespace clang {
namespace ento {
diff --git a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index 0f227bba0000..13ea4d3ed090 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -270,7 +270,7 @@ void MacOSKeychainAPIChecker::
os << "Deallocator doesn't match the allocator: '"
<< FunctionsToTrack[PDeallocIdx].Name << "' should be used.";
BugReport *Report = new BugReport(*BT, os.str(), N);
- Report->addVisitor(new SecKeychainBugVisitor(AP.first));
+ Report->addVisitor(llvm::make_unique<SecKeychainBugVisitor>(AP.first));
Report->addRange(ArgExpr->getSourceRange());
markInteresting(Report, AP);
C.emitReport(Report);
@@ -311,7 +311,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
<< FunctionsToTrack[DIdx].Name
<< "'.";
BugReport *Report = new BugReport(*BT, os.str(), N);
- Report->addVisitor(new SecKeychainBugVisitor(V));
+ Report->addVisitor(llvm::make_unique<SecKeychainBugVisitor>(V));
Report->addRange(ArgExpr->getSourceRange());
Report->markInteresting(AS->Region);
C.emitReport(Report);
@@ -430,7 +430,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
initBugType();
BugReport *Report = new BugReport(*BT,
"Only call free if a valid (non-NULL) buffer was returned.", N);
- Report->addVisitor(new SecKeychainBugVisitor(ArgSM));
+ Report->addVisitor(llvm::make_unique<SecKeychainBugVisitor>(ArgSM));
Report->addRange(ArgExpr->getSourceRange());
Report->markInteresting(AS->Region);
C.emitReport(Report);
@@ -540,7 +540,7 @@ BugReport *MacOSKeychainAPIChecker::
BugReport *Report = new BugReport(*BT, os.str(), N, LocUsedForUniqueing,
AllocNode->getLocationContext()->getDecl());
- Report->addVisitor(new SecKeychainBugVisitor(AP.first));
+ Report->addVisitor(llvm::make_unique<SecKeychainBugVisitor>(AP.first));
markInteresting(Report, AP);
return Report;
}
diff --git a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index a03fa259005a..aee5a43048b9 100644
--- a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -15,6 +15,7 @@
#include "ClangSACheckers.h"
#include "InterCheckerAPI.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/ParentMap.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -41,7 +42,8 @@ enum AllocationFamily {
AF_None,
AF_Malloc,
AF_CXXNew,
- AF_CXXNewArray
+ AF_CXXNewArray,
+ AF_IfNameIndex
};
class RefState {
@@ -160,7 +162,8 @@ public:
MallocChecker()
: II_malloc(nullptr), II_free(nullptr), II_realloc(nullptr),
II_calloc(nullptr), II_valloc(nullptr), II_reallocf(nullptr),
- II_strndup(nullptr), II_strdup(nullptr), II_kmalloc(nullptr) {}
+ II_strndup(nullptr), II_strdup(nullptr), II_kmalloc(nullptr),
+ II_if_nameindex(nullptr), II_if_freenameindex(nullptr) {}
/// In pessimistic mode, the checker assumes that it does not know which
/// functions might free the memory.
@@ -173,6 +176,12 @@ public:
CK_NumCheckKinds
};
+ enum class MemoryOperationKind {
+ MOK_Allocate,
+ MOK_Free,
+ MOK_Any
+ };
+
DefaultBool ChecksEnabled[CK_NumCheckKinds];
CheckName CheckNames[CK_NumCheckKinds];
@@ -211,7 +220,7 @@ private:
mutable std::unique_ptr<BugType> BT_OffsetFree[CK_NumCheckKinds];
mutable IdentifierInfo *II_malloc, *II_free, *II_realloc, *II_calloc,
*II_valloc, *II_reallocf, *II_strndup, *II_strdup,
- *II_kmalloc;
+ *II_kmalloc, *II_if_nameindex, *II_if_freenameindex;
mutable Optional<uint64_t> KernelZeroFlagVal;
void initIdentifierInfo(ASTContext &C) const;
@@ -237,8 +246,10 @@ private:
/// Check if this is one of the functions which can allocate/reallocate memory
/// pointed to by one of its arguments.
bool isMemFunction(const FunctionDecl *FD, ASTContext &C) const;
- bool isFreeFunction(const FunctionDecl *FD, ASTContext &C) const;
- bool isAllocationFunction(const FunctionDecl *FD, ASTContext &C) const;
+ bool isCMemFunction(const FunctionDecl *FD,
+ ASTContext &C,
+ AllocationFamily Family,
+ MemoryOperationKind MemKind) const;
bool isStandardNewDelete(const FunctionDecl *FD, ASTContext &C) const;
///@}
ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
@@ -419,9 +430,9 @@ private:
BugReporterContext &BRC,
BugReport &BR) override;
- PathDiagnosticPiece* getEndPath(BugReporterContext &BRC,
- const ExplodedNode *EndPathNode,
- BugReport &BR) override {
+ std::unique_ptr<PathDiagnosticPiece>
+ getEndPath(BugReporterContext &BRC, const ExplodedNode *EndPathNode,
+ BugReport &BR) override {
if (!IsLeak)
return nullptr;
@@ -429,7 +440,8 @@ private:
PathDiagnosticLocation::createEndOfPath(EndPathNode,
BRC.getSourceManager());
// Do not add the statement itself as a range in case of leak.
- return new PathDiagnosticEventPiece(L, BR.getDescription(), false);
+ return llvm::make_unique<PathDiagnosticEventPiece>(L, BR.getDescription(),
+ false);
}
private:
@@ -494,13 +506,15 @@ void MallocChecker::initIdentifierInfo(ASTContext &Ctx) const {
II_strdup = &Ctx.Idents.get("strdup");
II_strndup = &Ctx.Idents.get("strndup");
II_kmalloc = &Ctx.Idents.get("kmalloc");
+ II_if_nameindex = &Ctx.Idents.get("if_nameindex");
+ II_if_freenameindex = &Ctx.Idents.get("if_freenameindex");
}
bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const {
- if (isFreeFunction(FD, C))
+ if (isCMemFunction(FD, C, AF_Malloc, MemoryOperationKind::MOK_Any))
return true;
- if (isAllocationFunction(FD, C))
+ if (isCMemFunction(FD, C, AF_IfNameIndex, MemoryOperationKind::MOK_Any))
return true;
if (isStandardNewDelete(FD, C))
@@ -509,45 +523,61 @@ bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const {
return false;
}
-bool MallocChecker::isAllocationFunction(const FunctionDecl *FD,
- ASTContext &C) const {
+bool MallocChecker::isCMemFunction(const FunctionDecl *FD,
+ ASTContext &C,
+ AllocationFamily Family,
+ MemoryOperationKind MemKind) const {
if (!FD)
return false;
+ bool CheckFree = (MemKind == MemoryOperationKind::MOK_Any ||
+ MemKind == MemoryOperationKind::MOK_Free);
+ bool CheckAlloc = (MemKind == MemoryOperationKind::MOK_Any ||
+ MemKind == MemoryOperationKind::MOK_Allocate);
+
if (FD->getKind() == Decl::Function) {
- IdentifierInfo *FunI = FD->getIdentifier();
+ const IdentifierInfo *FunI = FD->getIdentifier();
initIdentifierInfo(C);
- if (FunI == II_malloc || FunI == II_realloc ||
- FunI == II_reallocf || FunI == II_calloc || FunI == II_valloc ||
- FunI == II_strdup || FunI == II_strndup || FunI == II_kmalloc)
- return true;
- }
+ if (Family == AF_Malloc && CheckFree) {
+ if (FunI == II_free || FunI == II_realloc || FunI == II_reallocf)
+ return true;
+ }
- if (ChecksEnabled[CK_MallocOptimistic] && FD->hasAttrs())
- for (const auto *I : FD->specific_attrs<OwnershipAttr>())
- if (I->getOwnKind() == OwnershipAttr::Returns)
+ if (Family == AF_Malloc && CheckAlloc) {
+ if (FunI == II_malloc || FunI == II_realloc || FunI == II_reallocf ||
+ FunI == II_calloc || FunI == II_valloc || FunI == II_strdup ||
+ FunI == II_strndup || FunI == II_kmalloc)
return true;
- return false;
-}
+ }
-bool MallocChecker::isFreeFunction(const FunctionDecl *FD, ASTContext &C) const {
- if (!FD)
- return false;
+ if (Family == AF_IfNameIndex && CheckFree) {
+ if (FunI == II_if_freenameindex)
+ return true;
+ }
- if (FD->getKind() == Decl::Function) {
- IdentifierInfo *FunI = FD->getIdentifier();
- initIdentifierInfo(C);
+ if (Family == AF_IfNameIndex && CheckAlloc) {
+ if (FunI == II_if_nameindex)
+ return true;
+ }
+ }
- if (FunI == II_free || FunI == II_realloc || FunI == II_reallocf)
- return true;
+ if (Family != AF_Malloc)
+ return false;
+
+ if (ChecksEnabled[CK_MallocOptimistic] && FD->hasAttrs()) {
+ for (const auto *I : FD->specific_attrs<OwnershipAttr>()) {
+ OwnershipAttr::OwnershipKind OwnKind = I->getOwnKind();
+ if(OwnKind == OwnershipAttr::Takes || OwnKind == OwnershipAttr::Holds) {
+ if (CheckFree)
+ return true;
+ } else if (OwnKind == OwnershipAttr::Returns) {
+ if (CheckAlloc)
+ return true;
+ }
+ }
}
- if (ChecksEnabled[CK_MallocOptimistic] && FD->hasAttrs())
- for (const auto *I : FD->specific_attrs<OwnershipAttr>())
- if (I->getOwnKind() == OwnershipAttr::Takes ||
- I->getOwnKind() == OwnershipAttr::Holds)
- return true;
return false;
}
@@ -730,6 +760,13 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
else
llvm_unreachable("not a new/delete operator");
+ } else if (FunI == II_if_nameindex) {
+ // Should we model this differently? We can allocate a fixed number of
+ // elements with zeros in the last one.
+ State = MallocMemAux(C, CE, UnknownVal(), UnknownVal(), State,
+ AF_IfNameIndex);
+ } else if (FunI == II_if_freenameindex) {
+ State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
}
}
@@ -753,6 +790,42 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
C.addTransition(State);
}
+static QualType getDeepPointeeType(QualType T) {
+ QualType Result = T, PointeeType = T->getPointeeType();
+ while (!PointeeType.isNull()) {
+ Result = PointeeType;
+ PointeeType = PointeeType->getPointeeType();
+ }
+ return Result;
+}
+
+static bool treatUnusedNewEscaped(const CXXNewExpr *NE) {
+
+ const CXXConstructExpr *ConstructE = NE->getConstructExpr();
+ if (!ConstructE)
+ return false;
+
+ if (!NE->getAllocatedType()->getAsCXXRecordDecl())
+ return false;
+
+ const CXXConstructorDecl *CtorD = ConstructE->getConstructor();
+
+ // Iterate over the constructor parameters.
+ for (const auto *CtorParam : CtorD->params()) {
+
+ QualType CtorParamPointeeT = CtorParam->getType()->getPointeeType();
+ if (CtorParamPointeeT.isNull())
+ continue;
+
+ CtorParamPointeeT = getDeepPointeeType(CtorParamPointeeT);
+
+ if (CtorParamPointeeT->getAsCXXRecordDecl())
+ return true;
+ }
+
+ return false;
+}
+
void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
CheckerContext &C) const {
@@ -765,6 +838,10 @@ void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
if (!isStandardNewDelete(NE->getOperatorNew(), C.getASTContext()))
return;
+ ParentMap &PM = C.getLocationContext()->getParentMap();
+ if (!PM.isConsumedExpr(NE) && treatUnusedNewEscaped(NE))
+ return;
+
ProgramStateRef State = C.getState();
// The return value from operator new is bound to a specified initialization
// value (if any) and we don't want to loose this value. So we call
@@ -859,6 +936,10 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
ProgramStateRef State,
AllocationFamily Family) {
+ // We expect the malloc functions to return a pointer.
+ if (!Loc::isLocType(CE->getType()))
+ return nullptr;
+
// Bind the return value to the symbolic value from the heap region.
// TODO: We could rewrite post visit to eval call; 'malloc' does not have
// side effects other than what we model here.
@@ -869,10 +950,6 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
.castAs<DefinedSVal>();
State = State->BindExpr(CE, C.getLocationContext(), RetVal);
- // We expect the malloc functions to return a pointer.
- if (!RetVal.getAs<Loc>())
- return nullptr;
-
// Fill the region with the initialization value.
State = State->bindDefault(RetVal, Init);
@@ -974,7 +1051,7 @@ AllocationFamily MallocChecker::getAllocationFamily(CheckerContext &C,
ASTContext &Ctx = C.getASTContext();
- if (isAllocationFunction(FD, Ctx) || isFreeFunction(FD, Ctx))
+ if (isCMemFunction(FD, Ctx, AF_Malloc, MemoryOperationKind::MOK_Any))
return AF_Malloc;
if (isStandardNewDelete(FD, Ctx)) {
@@ -985,6 +1062,9 @@ AllocationFamily MallocChecker::getAllocationFamily(CheckerContext &C,
return AF_CXXNewArray;
}
+ if (isCMemFunction(FD, Ctx, AF_IfNameIndex, MemoryOperationKind::MOK_Any))
+ return AF_IfNameIndex;
+
return AF_None;
}
@@ -1048,6 +1128,7 @@ void MallocChecker::printExpectedAllocName(raw_ostream &os, CheckerContext &C,
case AF_Malloc: os << "malloc()"; return;
case AF_CXXNew: os << "'new'"; return;
case AF_CXXNewArray: os << "'new[]'"; return;
+ case AF_IfNameIndex: os << "'if_nameindex()'"; return;
case AF_None: llvm_unreachable("not a deallocation expression");
}
}
@@ -1058,6 +1139,7 @@ void MallocChecker::printExpectedDeallocName(raw_ostream &os,
case AF_Malloc: os << "free()"; return;
case AF_CXXNew: os << "'delete'"; return;
case AF_CXXNewArray: os << "'delete[]'"; return;
+ case AF_IfNameIndex: os << "'if_freenameindex()'"; return;
case AF_None: llvm_unreachable("suspicious AF_None argument");
}
}
@@ -1201,7 +1283,8 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
Optional<MallocChecker::CheckKind>
MallocChecker::getCheckIfTracked(AllocationFamily Family) const {
switch (Family) {
- case AF_Malloc: {
+ case AF_Malloc:
+ case AF_IfNameIndex: {
if (ChecksEnabled[CK_MallocOptimistic]) {
return CK_MallocOptimistic;
} else if (ChecksEnabled[CK_MallocPessimistic]) {
@@ -1425,7 +1508,7 @@ void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
BugReport *R = new BugReport(*BT_MismatchedDealloc, os.str(), N);
R->markInteresting(Sym);
R->addRange(Range);
- R->addVisitor(new MallocBugVisitor(Sym));
+ R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
C.emitReport(R);
}
}
@@ -1509,7 +1592,7 @@ void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
R->markInteresting(Sym);
R->addRange(Range);
- R->addVisitor(new MallocBugVisitor(Sym));
+ R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
C.emitReport(R);
}
}
@@ -1541,7 +1624,7 @@ void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range,
R->markInteresting(Sym);
if (PrevSym)
R->markInteresting(PrevSym);
- R->addVisitor(new MallocBugVisitor(Sym));
+ R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
C.emitReport(R);
}
}
@@ -1565,7 +1648,7 @@ void MallocChecker::ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
"Attempt to delete released memory", N);
R->markInteresting(Sym);
- R->addVisitor(new MallocBugVisitor(Sym));
+ R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
C.emitReport(R);
}
}
@@ -1793,7 +1876,7 @@ void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
new BugReport(*BT_Leak[*CheckKind], os.str(), N, LocUsedForUniqueing,
AllocNode->getLocationContext()->getDecl());
R->markInteresting(Sym);
- R->addVisitor(new MallocBugVisitor(Sym, true));
+ R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym, true));
C.emitReport(R);
}
@@ -1865,13 +1948,16 @@ void MallocChecker::checkPreCall(const CallEvent &Call,
if (!FD)
return;
+ ASTContext &Ctx = C.getASTContext();
if ((ChecksEnabled[CK_MallocOptimistic] ||
ChecksEnabled[CK_MallocPessimistic]) &&
- isFreeFunction(FD, C.getASTContext()))
+ (isCMemFunction(FD, Ctx, AF_Malloc, MemoryOperationKind::MOK_Free) ||
+ isCMemFunction(FD, Ctx, AF_IfNameIndex,
+ MemoryOperationKind::MOK_Free)))
return;
if (ChecksEnabled[CK_NewDeleteChecker] &&
- isStandardNewDelete(FD, C.getASTContext()))
+ isStandardNewDelete(FD, Ctx))
return;
}
diff --git a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index 4a50d9362874..296aec66805a 100644
--- a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -137,6 +137,10 @@ public:
// Determine if the pointee and sizeof types are compatible. Here
// we ignore constness of pointer types.
static bool typesCompatible(ASTContext &C, QualType A, QualType B) {
+ // sizeof(void*) is compatible with any other pointer.
+ if (B->isVoidPointerType() && A->getAs<PointerType>())
+ return true;
+
while (true) {
A = A.getCanonicalType();
B = B.getCanonicalType();
diff --git a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 61d2b87cb0c0..cb2d46b58310 100644
--- a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -49,14 +49,27 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
if (!FD)
return;
- const NonNullAttr *Att = FD->getAttr<NonNullAttr>();
+ // Merge all non-null attributes
+ unsigned NumArgs = Call.getNumArgs();
+ llvm::SmallBitVector AttrNonNull(NumArgs);
+ for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
+ if (!NonNull->args_size()) {
+ AttrNonNull.set(0, NumArgs);
+ break;
+ }
+ for (unsigned Val : NonNull->args()) {
+ if (Val >= NumArgs)
+ continue;
+ AttrNonNull.set(Val);
+ }
+ }
ProgramStateRef state = C.getState();
CallEvent::param_type_iterator TyI = Call.param_type_begin(),
TyE = Call.param_type_end();
- for (unsigned idx = 0, count = Call.getNumArgs(); idx != count; ++idx){
+ for (unsigned idx = 0; idx < NumArgs; ++idx) {
// Check if the parameter is a reference. We want to report when reference
// to a null pointer is passed as a paramter.
@@ -66,7 +79,7 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
TyI++;
}
- bool haveAttrNonNull = Att && Att->isNonNull(idx);
+ bool haveAttrNonNull = AttrNonNull[idx];
if (!haveAttrNonNull) {
// Check if the parameter is also marked 'nonnull'.
ArrayRef<ParmVarDecl*> parms = Call.parameters();
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
index eb699d694087..9a460ba50ddd 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
@@ -1359,6 +1359,7 @@ RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
// Check the method family, and apply any default annotations.
switch (MD ? MD->getMethodFamily() : S.getMethodFamily()) {
case OMF_None:
+ case OMF_initialize:
case OMF_performSelector:
// Assume all Objective-C methods follow Cocoa Memory Management rules.
// FIXME: Does the non-threaded performSelector family really belong here?
@@ -1579,19 +1580,19 @@ void RetainSummaryManager::InitializeMethodSummaries() {
// Create summaries QCRenderer/QCView -createSnapShotImageOfType:
addInstMethSummary("QCRenderer", AllocSumm,
- "createSnapshotImageOfType", NULL);
+ "createSnapshotImageOfType", nullptr);
addInstMethSummary("QCView", AllocSumm,
- "createSnapshotImageOfType", NULL);
+ "createSnapshotImageOfType", nullptr);
// Create summaries for CIContext, 'createCGImage' and
// 'createCGLayerWithSize'. These objects are CF objects, and are not
// automatically garbage collected.
addInstMethSummary("CIContext", CFAllocSumm,
- "createCGImage", "fromRect", NULL);
- addInstMethSummary("CIContext", CFAllocSumm,
- "createCGImage", "fromRect", "format", "colorSpace", NULL);
- addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize",
- "info", NULL);
+ "createCGImage", "fromRect", nullptr);
+ addInstMethSummary("CIContext", CFAllocSumm, "createCGImage", "fromRect",
+ "format", "colorSpace", nullptr);
+ addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize", "info",
+ nullptr);
}
//===----------------------------------------------------------------------===//
@@ -1716,9 +1717,9 @@ namespace {
BugReporterContext &BRC,
BugReport &BR) override;
- PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
- const ExplodedNode *N,
- BugReport &BR) override;
+ std::unique_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR) override;
};
class CFRefLeakReportVisitor : public CFRefReportVisitor {
@@ -1727,17 +1728,17 @@ namespace {
const SummaryLogTy &log)
: CFRefReportVisitor(sym, GCEnabled, log) {}
- PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
- const ExplodedNode *N,
- BugReport &BR) override;
+ std::unique_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR) override;
- BugReporterVisitor *clone() const override {
+ std::unique_ptr<BugReporterVisitor> clone() const override {
// The curiously-recurring template pattern only works for one level of
// subclassing. Rather than make a new template base for
// CFRefReportVisitor, we simply override clone() to do the right thing.
// This could be trouble someday if BugReporterVisitorImpl is ever
// used for something else besides a convenient implementation of clone().
- return new CFRefLeakReportVisitor(*this);
+ return llvm::make_unique<CFRefLeakReportVisitor>(*this);
}
};
@@ -1750,7 +1751,7 @@ namespace {
bool registerVisitor = true)
: BugReport(D, D.getDescription(), n) {
if (registerVisitor)
- addVisitor(new CFRefReportVisitor(sym, GCEnabled, Log));
+ addVisitor(llvm::make_unique<CFRefReportVisitor>(sym, GCEnabled, Log));
addGCModeDescription(LOpts, GCEnabled);
}
@@ -1758,7 +1759,7 @@ namespace {
const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
StringRef endText)
: BugReport(D, D.getDescription(), endText, n) {
- addVisitor(new CFRefReportVisitor(sym, GCEnabled, Log));
+ addVisitor(llvm::make_unique<CFRefReportVisitor>(sym, GCEnabled, Log));
addGCModeDescription(LOpts, GCEnabled);
}
@@ -2218,18 +2219,16 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
InterestingMethodContext);
}
-PathDiagnosticPiece*
+std::unique_ptr<PathDiagnosticPiece>
CFRefReportVisitor::getEndPath(BugReporterContext &BRC,
- const ExplodedNode *EndN,
- BugReport &BR) {
+ const ExplodedNode *EndN, BugReport &BR) {
BR.markInteresting(Sym);
return BugReporterVisitor::getDefaultEndPath(BRC, EndN, BR);
}
-PathDiagnosticPiece*
+std::unique_ptr<PathDiagnosticPiece>
CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
- const ExplodedNode *EndN,
- BugReport &BR) {
+ const ExplodedNode *EndN, BugReport &BR) {
// Tell the BugReporterContext to report cases when the tracked symbol is
// assigned to different variables, etc.
@@ -2309,7 +2308,7 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
os << " is not referenced later in this execution path and has a retain "
"count of +" << RV->getCount();
- return new PathDiagnosticEventPiece(L, os.str());
+ return llvm::make_unique<PathDiagnosticEventPiece>(L, os.str());
}
CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
@@ -2388,7 +2387,7 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
}
}
- addVisitor(new CFRefLeakReportVisitor(sym, GCEnabled, Log));
+ addVisitor(llvm::make_unique<CFRefLeakReportVisitor>(sym, GCEnabled, Log));
}
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Checkers/SelectorExtras.h b/lib/StaticAnalyzer/Checkers/SelectorExtras.h
index 48d96eb42234..41f70d7d5b69 100644
--- a/lib/StaticAnalyzer/Checkers/SelectorExtras.h
+++ b/lib/StaticAnalyzer/Checkers/SelectorExtras.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SA_CHECKERS_SELECTOREXTRAS
-#define LLVM_CLANG_SA_CHECKERS_SELECTOREXTRAS
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SELECTOREXTRAS_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SELECTOREXTRAS_H
#include "clang/AST/ASTContext.h"
#include <cstdarg>
@@ -34,7 +34,7 @@ static inline Selector getKeywordSelector(ASTContext &Ctx, va_list argp) {
return getKeywordSelectorImpl(Ctx, First, argp);
}
-END_WITH_NULL
+LLVM_END_WITH_NULL
static inline Selector getKeywordSelector(ASTContext &Ctx,
const char *First, ...) {
va_list argp;
@@ -44,7 +44,7 @@ static inline Selector getKeywordSelector(ASTContext &Ctx,
return result;
}
-END_WITH_NULL
+LLVM_END_WITH_NULL
static inline void lazyInitKeywordSelector(Selector &Sel, ASTContext &Ctx,
const char *First, ...) {
if (!Sel.isNull())
diff --git a/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
index 3e9b57bdc507..ccf816c80c1e 100644
--- a/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -63,8 +63,7 @@ class SimpleStreamChecker : public Checker<check::PostCall,
const CallEvent &Call,
CheckerContext &C) const;
- void reportLeaks(SymbolVector LeakedStreams,
- CheckerContext &C,
+ void reportLeaks(ArrayRef<SymbolRef> LeakedStreams, CheckerContext &C,
ExplodedNode *ErrNode) const;
bool guaranteedNotToCloseFile(const CallEvent &Call) const;
@@ -222,16 +221,15 @@ void SimpleStreamChecker::reportDoubleClose(SymbolRef FileDescSym,
C.emitReport(R);
}
-void SimpleStreamChecker::reportLeaks(SymbolVector LeakedStreams,
- CheckerContext &C,
- ExplodedNode *ErrNode) const {
+void SimpleStreamChecker::reportLeaks(ArrayRef<SymbolRef> LeakedStreams,
+ CheckerContext &C,
+ ExplodedNode *ErrNode) const {
// Attach bug reports to the leak node.
// TODO: Identify the leaked file descriptor.
- for (SmallVectorImpl<SymbolRef>::iterator
- I = LeakedStreams.begin(), E = LeakedStreams.end(); I != E; ++I) {
+ for (SymbolRef LeakedStream : LeakedStreams) {
BugReport *R = new BugReport(*LeakBugType,
"Opened file is never closed; potential resource leak", ErrNode);
- R->markInteresting(*I);
+ R->markInteresting(LeakedStream);
C.emitReport(R);
}
}
diff --git a/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp b/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
index dad5c0d3bab8..083075db8fb9 100644
--- a/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
@@ -176,7 +176,8 @@ void TestAfterDivZeroChecker::reportBug(SVal Val, CheckerContext &C) const {
"already been used for division",
N);
- R->addVisitor(new DivisionBRVisitor(Val.getAsSymbol(), C.getStackFrame()));
+ R->addVisitor(llvm::make_unique<DivisionBRVisitor>(Val.getAsSymbol(),
+ C.getStackFrame()));
C.emitReport(R);
}
}
diff --git a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index 93687db073f9..8976e0a699ec 100644
--- a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -92,8 +92,8 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
BugReport *R = new BugReport(*BT, os.str(), N);
if (const Expr *Ex = FindBlockDeclRefExpr(BE->getBody(), VD))
R->addRange(Ex->getSourceRange());
- R->addVisitor(new FindLastStoreBRVisitor(*V, VR,
- /*EnableNullFPSuppression*/false));
+ R->addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ *V, VR, /*EnableNullFPSuppression*/ false));
R->disablePathPruning();
// need location of block
C.emitReport(R);
diff --git a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index 4887d804c607..4bfed8504f98 100644
--- a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -62,6 +62,10 @@ private:
return;
BT.reset(new BugType(this, name, categories::UnixAPI));
}
+ void ReportOpenBug(CheckerContext &C,
+ ProgramStateRef State,
+ const char *Msg,
+ SourceRange SR) const;
};
} //end anonymous namespace
@@ -69,7 +73,44 @@ private:
// "open" (man 2 open)
//===----------------------------------------------------------------------===//
+void UnixAPIChecker::ReportOpenBug(CheckerContext &C,
+ ProgramStateRef State,
+ const char *Msg,
+ SourceRange SR) const {
+ ExplodedNode *N = C.generateSink(State);
+ if (!N)
+ return;
+
+ LazyInitialize(BT_open, "Improper use of 'open'");
+
+ BugReport *Report = new BugReport(*BT_open, Msg, N);
+ Report->addRange(SR);
+ C.emitReport(Report);
+}
+
void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+
+ if (CE->getNumArgs() < 2) {
+ // The frontend should issue a warning for this case, so this is a sanity
+ // check.
+ return;
+ } else if (CE->getNumArgs() == 3) {
+ const Expr *Arg = CE->getArg(2);
+ QualType QT = Arg->getType();
+ if (!QT->isIntegerType()) {
+ ReportOpenBug(C, state,
+ "Third argument to 'open' is not an integer",
+ Arg->getSourceRange());
+ return;
+ }
+ } else if (CE->getNumArgs() > 3) {
+ ReportOpenBug(C, state,
+ "Call to 'open' with more than three arguments",
+ CE->getArg(3)->getSourceRange());
+ return;
+ }
+
// The definition of O_CREAT is platform specific. We need a better way
// of querying this information from the checking environment.
if (!Val_O_CREAT.hasValue()) {
@@ -85,15 +126,6 @@ void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
}
}
- // Look at the 'oflags' argument for the O_CREAT flag.
- ProgramStateRef state = C.getState();
-
- if (CE->getNumArgs() < 2) {
- // The frontend should issue a warning for this case, so this is a sanity
- // check.
- return;
- }
-
// Now check if oflags has O_CREAT set.
const Expr *oflagsEx = CE->getArg(1);
const SVal V = state->getSVal(oflagsEx, C.getLocationContext());
@@ -122,18 +154,10 @@ void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
return;
if (CE->getNumArgs() < 3) {
- ExplodedNode *N = C.generateSink(trueState);
- if (!N)
- return;
-
- LazyInitialize(BT_open, "Improper use of 'open'");
-
- BugReport *report =
- new BugReport(*BT_open,
- "Call to 'open' requires a third argument when "
- "the 'O_CREAT' flag is set", N);
- report->addRange(oflagsEx->getSourceRange());
- C.emitReport(report);
+ ReportOpenBug(C, trueState,
+ "Call to 'open' requires a third argument when "
+ "the 'O_CREAT' flag is set",
+ oflagsEx->getSourceRange());
}
}
diff --git a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index 198a6285c908..cceffef82b36 100644
--- a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -30,7 +30,7 @@ using namespace ento;
namespace {
class VLASizeChecker : public Checker< check::PreStmt<DeclStmt> > {
mutable std::unique_ptr<BugType> BT;
- enum VLASize_Kind { VLA_Garbage, VLA_Zero, VLA_Tainted };
+ enum VLASize_Kind { VLA_Garbage, VLA_Zero, VLA_Tainted, VLA_Negative };
void reportBug(VLASize_Kind Kind,
const Expr *SizeE,
@@ -67,6 +67,9 @@ void VLASizeChecker::reportBug(VLASize_Kind Kind,
case VLA_Tainted:
os << "has tainted size";
break;
+ case VLA_Negative:
+ os << "has negative size";
+ break;
}
BugReport *report = new BugReport(*BT, os.str(), N);
@@ -128,8 +131,27 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
// declared. We do this by multiplying the array length by the element size,
// then matching that with the array region's extent symbol.
- // Convert the array length to size_t.
+ // Check if the size is negative.
SValBuilder &svalBuilder = C.getSValBuilder();
+
+ QualType Ty = SE->getType();
+ DefinedOrUnknownSVal Zero = svalBuilder.makeZeroVal(Ty);
+
+ SVal LessThanZeroVal = svalBuilder.evalBinOp(state, BO_LT, sizeD, Zero, Ty);
+ if (Optional<DefinedSVal> LessThanZeroDVal =
+ LessThanZeroVal.getAs<DefinedSVal>()) {
+ ConstraintManager &CM = C.getConstraintManager();
+ ProgramStateRef StatePos, StateNeg;
+
+ std::tie(StateNeg, StatePos) = CM.assumeDual(state, *LessThanZeroDVal);
+ if (StateNeg && !StatePos) {
+ reportBug(VLA_Negative, SE, state, C);
+ return;
+ }
+ state = StatePos;
+ }
+
+ // Convert the array length to size_t.
QualType SizeTy = Ctx.getSizeType();
NonLoc ArrayLength =
svalBuilder.evalCast(sizeD, SizeTy, SE->getType()).castAs<NonLoc>();
diff --git a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index f8f5cf93ca89..7e1fc1eb54ad 100644
--- a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -146,15 +146,22 @@ void WalkAST::VisitCXXMemberCallExpr(CallExpr *CE) {
if (CME->getQualifier())
callIsNonVirtual = true;
- // Elide analyzing the call entirely if the base pointer is not 'this'.
- if (Expr *base = CME->getBase()->IgnoreImpCasts())
+ if (Expr *base = CME->getBase()->IgnoreImpCasts()) {
+ // Elide analyzing the call entirely if the base pointer is not 'this'.
if (!isa<CXXThisExpr>(base))
return;
+
+ // If the most derived class is marked final, we know that now subclass
+ // can override this member.
+ if (base->getBestDynamicClassType()->hasAttr<FinalAttr>())
+ callIsNonVirtual = true;
+ }
}
// Get the callee.
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CE->getDirectCallee());
- if (MD && MD->isVirtual() && !callIsNonVirtual)
+ if (MD && MD->isVirtual() && !callIsNonVirtual && !MD->hasAttr<FinalAttr>() &&
+ !MD->getParent()->hasAttr<FinalAttr>())
ReportVirtualCall(CE, MD->isPure());
Enqueue(CE);
diff --git a/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index 747b73c4164b..5798f01370de 100644
--- a/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -20,13 +20,16 @@ AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
StoreManagerCreator storemgr,
ConstraintManagerCreator constraintmgr,
CheckerManager *checkerMgr,
- AnalyzerOptions &Options)
+ AnalyzerOptions &Options,
+ CodeInjector *injector)
: AnaCtxMgr(Options.UnoptimizedCFG,
/*AddImplicitDtors=*/true,
/*AddInitializers=*/true,
Options.includeTemporaryDtorsInCFG(),
Options.shouldSynthesizeBodies(),
- Options.shouldConditionalizeStaticInitializers()),
+ Options.shouldConditionalizeStaticInitializers(),
+ /*addCXXNewAllocator=*/true,
+ injector),
Ctx(ctx),
Diags(diags),
LangOpts(lang),
diff --git a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
index 7944c7eb0003..d717e3fe86d2 100644
--- a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
+++ b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -23,7 +23,8 @@ using namespace llvm;
AnalyzerOptions::UserModeKind AnalyzerOptions::getUserMode() {
if (UserMode == UMK_NotSet) {
- StringRef ModeStr(Config.GetOrCreateValue("mode", "deep").getValue());
+ StringRef ModeStr =
+ Config.insert(std::make_pair("mode", "deep")).first->second;
UserMode = llvm::StringSwitch<UserModeKind>(ModeStr)
.Case("shallow", UMK_Shallow)
.Case("deep", UMK_Deep)
@@ -48,7 +49,8 @@ IPAKind AnalyzerOptions::getIPAMode() {
assert(DefaultIPA);
// Lookup the ipa configuration option, use the default from User Mode.
- StringRef ModeStr(Config.GetOrCreateValue("ipa", DefaultIPA).getValue());
+ StringRef ModeStr =
+ Config.insert(std::make_pair("ipa", DefaultIPA)).first->second;
IPAKind IPAConfig = llvm::StringSwitch<IPAKind>(ModeStr)
.Case("none", IPAK_None)
.Case("basic-inlining", IPAK_BasicInlining)
@@ -72,9 +74,9 @@ AnalyzerOptions::mayInlineCXXMemberFunction(CXXInlineableMemberKind K) {
if (!CXXMemberInliningMode) {
static const char *ModeKey = "c++-inlining";
-
- StringRef ModeStr(Config.GetOrCreateValue(ModeKey,
- "destructors").getValue());
+
+ StringRef ModeStr =
+ Config.insert(std::make_pair(ModeKey, "destructors")).first->second;
CXXInlineableMemberKind &MutableMode =
const_cast<CXXInlineableMemberKind &>(CXXMemberInliningMode);
@@ -102,7 +104,8 @@ bool AnalyzerOptions::getBooleanOption(StringRef Name, bool DefaultVal) {
// FIXME: We should emit a warning here if the value is something other than
// "true", "false", or the empty string (meaning the default value),
// but the AnalyzerOptions doesn't have access to a diagnostic engine.
- StringRef V(Config.GetOrCreateValue(Name, toString(DefaultVal)).getValue());
+ StringRef V =
+ Config.insert(std::make_pair(Name, toString(DefaultVal))).first->second;
return llvm::StringSwitch<bool>(V)
.Case("true", true)
.Case("false", false)
@@ -200,8 +203,8 @@ int AnalyzerOptions::getOptionAsInteger(StringRef Name, int DefaultVal) {
SmallString<10> StrBuf;
llvm::raw_svector_ostream OS(StrBuf);
OS << DefaultVal;
-
- StringRef V(Config.GetOrCreateValue(Name, OS.str()).getValue());
+
+ StringRef V = Config.insert(std::make_pair(Name, OS.str())).first->second;
int Res = DefaultVal;
bool b = V.getAsInteger(10, Res);
assert(!b && "analyzer-config option should be numeric");
diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp
index 141a48ba10b3..dff81e383ea6 100644
--- a/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -499,10 +499,9 @@ PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
//===----------------------------------------------------------------------===//
// "Visitors only" path diagnostic generation algorithm.
//===----------------------------------------------------------------------===//
-static bool GenerateVisitorsOnlyPathDiagnostic(PathDiagnostic &PD,
- PathDiagnosticBuilder &PDB,
- const ExplodedNode *N,
- ArrayRef<BugReporterVisitor *> visitors) {
+static bool GenerateVisitorsOnlyPathDiagnostic(
+ PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N,
+ ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) {
// All path generation skips the very first node (the error node).
// This is because there is special handling for the end-of-path note.
N = N->getFirstPred();
@@ -511,11 +510,9 @@ static bool GenerateVisitorsOnlyPathDiagnostic(PathDiagnostic &PD,
BugReport *R = PDB.getBugReport();
while (const ExplodedNode *Pred = N->getFirstPred()) {
- for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
- E = visitors.end();
- I != E; ++I) {
+ for (auto &V : visitors) {
// Visit all the node pairs, but throw the path pieces away.
- PathDiagnosticPiece *Piece = (*I)->VisitNode(N, Pred, PDB, *R);
+ PathDiagnosticPiece *Piece = V->VisitNode(N, Pred, PDB, *R);
delete Piece;
}
@@ -556,11 +553,10 @@ static void updateStackPiecesWithMessage(PathDiagnosticPiece *P,
static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM);
-static bool GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
- PathDiagnosticBuilder &PDB,
- const ExplodedNode *N,
- LocationContextMap &LCM,
- ArrayRef<BugReporterVisitor *> visitors) {
+static bool GenerateMinimalPathDiagnostic(
+ PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N,
+ LocationContextMap &LCM,
+ ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) {
SourceManager& SMgr = PDB.getSourceManager();
const LocationContext *LC = PDB.LC;
@@ -870,10 +866,8 @@ static bool GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
if (NextNode) {
// Add diagnostic pieces from custom visitors.
BugReport *R = PDB.getBugReport();
- for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
- E = visitors.end();
- I != E; ++I) {
- if (PathDiagnosticPiece *p = (*I)->VisitNode(N, NextNode, PDB, *R)) {
+ for (auto &V : visitors) {
+ if (PathDiagnosticPiece *p = V->VisitNode(N, NextNode, PDB, *R)) {
PD.getActivePath().push_front(p);
updateStackPiecesWithMessage(p, CallStack);
}
@@ -1392,11 +1386,10 @@ static bool isInLoopBody(ParentMap &PM, const Stmt *S, const Stmt *Term) {
// Top-level logic for generating extensive path diagnostics.
//===----------------------------------------------------------------------===//
-static bool GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
- PathDiagnosticBuilder &PDB,
- const ExplodedNode *N,
- LocationContextMap &LCM,
- ArrayRef<BugReporterVisitor *> visitors) {
+static bool GenerateExtensivePathDiagnostic(
+ PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N,
+ LocationContextMap &LCM,
+ ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) {
EdgeBuilder EB(PD, PDB);
const SourceManager& SM = PDB.getSourceManager();
StackDiagVector CallStack;
@@ -1573,10 +1566,8 @@ static bool GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
// Add pieces from custom visitors.
BugReport *R = PDB.getBugReport();
- for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
- E = visitors.end();
- I != E; ++I) {
- if (PathDiagnosticPiece *p = (*I)->VisitNode(N, NextNode, PDB, *R)) {
+ for (auto &V : visitors) {
+ if (PathDiagnosticPiece *p = V->VisitNode(N, NextNode, PDB, *R)) {
const PathDiagnosticLocation &Loc = p->getLocation();
EB.addEdge(Loc, true);
PD.getActivePath().push_front(p);
@@ -1635,12 +1626,10 @@ static const char StrLoopRangeEmpty[] =
static const char StrLoopCollectionEmpty[] =
"Loop body skipped when collection is empty";
-static bool
-GenerateAlternateExtensivePathDiagnostic(PathDiagnostic& PD,
- PathDiagnosticBuilder &PDB,
- const ExplodedNode *N,
- LocationContextMap &LCM,
- ArrayRef<BugReporterVisitor *> visitors) {
+static bool GenerateAlternateExtensivePathDiagnostic(
+ PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N,
+ LocationContextMap &LCM,
+ ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) {
BugReport *report = PDB.getBugReport();
const SourceManager& SM = PDB.getSourceManager();
@@ -1867,10 +1856,8 @@ GenerateAlternateExtensivePathDiagnostic(PathDiagnostic& PD,
continue;
// Add pieces from custom visitors.
- for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
- E = visitors.end();
- I != E; ++I) {
- if (PathDiagnosticPiece *p = (*I)->VisitNode(N, NextNode, PDB, *report)) {
+ for (auto &V : visitors) {
+ if (PathDiagnosticPiece *p = V->VisitNode(N, NextNode, PDB, *report)) {
addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), PDB.LC);
PD.getActivePath().push_front(p);
updateStackPiecesWithMessage(p, CallStack);
@@ -2547,7 +2534,7 @@ void BuiltinBug::anchor() {}
void BugReport::NodeResolver::anchor() {}
-void BugReport::addVisitor(BugReporterVisitor* visitor) {
+void BugReport::addVisitor(std::unique_ptr<BugReporterVisitor> visitor) {
if (!visitor)
return;
@@ -2555,20 +2542,15 @@ void BugReport::addVisitor(BugReporterVisitor* visitor) {
visitor->Profile(ID);
void *InsertPos;
- if (CallbacksSet.FindNodeOrInsertPos(ID, InsertPos)) {
- delete visitor;
+ if (CallbacksSet.FindNodeOrInsertPos(ID, InsertPos))
return;
- }
- CallbacksSet.InsertNode(visitor, InsertPos);
- Callbacks.push_back(visitor);
+ CallbacksSet.InsertNode(visitor.get(), InsertPos);
+ Callbacks.push_back(std::move(visitor));
++ConfigurationChangeToken;
}
BugReport::~BugReport() {
- for (visitor_iterator I = visitor_begin(), E = visitor_end(); I != E; ++I) {
- delete *I;
- }
while (!interestingSymbols.empty()) {
popInterestingSymbolsAndRegions();
}
@@ -2874,7 +2856,7 @@ TrimmedGraph::TrimmedGraph(const ExplodedGraph *OriginalGraph,
// The trimmed graph is created in the body of the constructor to ensure
// that the DenseMaps have been initialized already.
InterExplodedGraphMap ForwardMap;
- G.reset(OriginalGraph->trim(Nodes, &ForwardMap, &InverseMap));
+ G = OriginalGraph->trim(Nodes, &ForwardMap, &InverseMap);
// Find the (first) error node in the trimmed graph. We just need to consult
// the node map which maps from nodes in the original graph to nodes
@@ -2938,8 +2920,7 @@ bool TrimmedGraph::popNextReportGraph(ReportGraph &GraphWrapper) {
// Create a new graph with a single path. This is the graph
// that will be returned to the caller.
- ExplodedGraph *GNew = new ExplodedGraph();
- GraphWrapper.Graph.reset(GNew);
+ auto GNew = llvm::make_unique<ExplodedGraph>();
GraphWrapper.BackMap.clear();
// Now walk from the error node up the BFS path, always taking the
@@ -2976,6 +2957,8 @@ bool TrimmedGraph::popNextReportGraph(ReportGraph &GraphWrapper) {
PriorityCompare<false>(PriorityMap));
}
+ GraphWrapper.Graph = std::move(GNew);
+
return true;
}
@@ -3126,9 +3109,9 @@ bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD,
const ExplodedNode *N = ErrorGraph.ErrorNode;
// Register additional node visitors.
- R->addVisitor(new NilReceiverBRVisitor());
- R->addVisitor(new ConditionBRVisitor());
- R->addVisitor(new LikelyFalsePositiveSuppressionBRVisitor());
+ R->addVisitor(llvm::make_unique<NilReceiverBRVisitor>());
+ R->addVisitor(llvm::make_unique<ConditionBRVisitor>());
+ R->addVisitor(llvm::make_unique<LikelyFalsePositiveSuppressionBRVisitor>());
BugReport::VisitorList visitors;
unsigned origReportConfigToken, finalReportConfigToken;
@@ -3153,18 +3136,19 @@ bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD,
std::unique_ptr<PathDiagnosticPiece> LastPiece;
for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end();
I != E; ++I) {
- if (PathDiagnosticPiece *Piece = (*I)->getEndPath(PDB, N, *R)) {
+ if (std::unique_ptr<PathDiagnosticPiece> Piece =
+ (*I)->getEndPath(PDB, N, *R)) {
assert (!LastPiece &&
"There can only be one final piece in a diagnostic.");
- LastPiece.reset(Piece);
+ LastPiece = std::move(Piece);
}
}
if (ActiveScheme != PathDiagnosticConsumer::None) {
if (!LastPiece)
- LastPiece.reset(BugReporterVisitor::getDefaultEndPath(PDB, N, *R));
+ LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R);
assert(LastPiece);
- PD.setEndOfPath(LastPiece.release());
+ PD.setEndOfPath(std::move(LastPiece));
}
// Make sure we get a clean location context map so we don't
@@ -3187,7 +3171,7 @@ bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD,
}
// Clean up the visitors we used.
- llvm::DeleteContainerPointers(visitors);
+ visitors.clear();
// Did anything change while generating this path?
finalReportConfigToken = R->getConfigurationChangeToken();
@@ -3247,15 +3231,15 @@ void BugReporter::emitReport(BugReport* R) {
// To guarantee memory release.
std::unique_ptr<BugReport> UniqueR(R);
- // Defensive checking: throw the bug away if it comes from a BodyFarm-
- // generated body. We do this very early because report processing relies
- // on the report's location being valid.
- // FIXME: Valid bugs can occur in BodyFarm-generated bodies, so really we
- // need to just find a reasonable location like we do later on with the path
- // pieces.
if (const ExplodedNode *E = R->getErrorNode()) {
- const LocationContext *LCtx = E->getLocationContext();
- if (LCtx->getAnalysisDeclContext()->isBodyAutosynthesized())
+ const AnalysisDeclContext *DeclCtx =
+ E->getLocationContext()->getAnalysisDeclContext();
+ // The source of autosynthesized body can be handcrafted AST or a model
+ // file. The locations from handcrafted ASTs have no valid source locations
+ // and have to be discarded. Locations from model files should be preserved
+ // for processing and reporting.
+ if (DeclCtx->isBodyAutosynthesized() &&
+ !DeclCtx->isBodyAutosynthesizedFromModelFile())
return;
}
@@ -3277,12 +3261,11 @@ void BugReporter::emitReport(BugReport* R) {
BugReportEquivClass* EQ = EQClasses.FindNodeOrInsertPos(ID, InsertPos);
if (!EQ) {
- EQ = new BugReportEquivClass(UniqueR.release());
+ EQ = new BugReportEquivClass(std::move(UniqueR));
EQClasses.InsertNode(EQ, InsertPos);
EQClassesVector.push_back(EQ);
- }
- else
- EQ->AddReport(UniqueR.release());
+ } else
+ EQ->AddReport(std::move(UniqueR));
}
@@ -3449,13 +3432,13 @@ void BugReporter::FlushReport(BugReport *exampleReport,
// of the issue.
if (D->path.empty()) {
PathDiagnosticLocation L = exampleReport->getLocation(getSourceManager());
- PathDiagnosticPiece *piece =
- new PathDiagnosticEventPiece(L, exampleReport->getDescription());
+ auto piece = llvm::make_unique<PathDiagnosticEventPiece>(
+ L, exampleReport->getDescription());
BugReport::ranges_iterator Beg, End;
std::tie(Beg, End) = exampleReport->getRanges();
for ( ; Beg != End; ++Beg)
piece->addRange(*Beg);
- D->setEndOfPath(piece);
+ D->setEndOfPath(std::move(piece));
}
// Get the meta data.
@@ -3465,7 +3448,7 @@ void BugReporter::FlushReport(BugReport *exampleReport,
D->addMeta(*i);
}
- PD.HandlePathDiagnostic(D.release());
+ PD.HandlePathDiagnostic(std::move(D));
}
void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
@@ -3497,13 +3480,9 @@ BugType *BugReporter::getBugTypeForName(CheckName CheckName, StringRef name,
SmallString<136> fullDesc;
llvm::raw_svector_ostream(fullDesc) << CheckName.getName() << ":" << name
<< ":" << category;
- llvm::StringMapEntry<BugType *> &
- entry = StrBugTypes.GetOrCreateValue(fullDesc);
- BugType *BT = entry.getValue();
- if (!BT) {
+ BugType *&BT = StrBugTypes[fullDesc];
+ if (!BT)
BT = new BugType(CheckName, name, category);
- entry.setValue(BT);
- }
return BT;
}
diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 0503acec051a..2d56bd088497 100644
--- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -100,17 +100,14 @@ const Stmt *bugreporter::GetRetValExpr(const ExplodedNode *N) {
// Definitions for bug reporter visitors.
//===----------------------------------------------------------------------===//
-PathDiagnosticPiece*
+std::unique_ptr<PathDiagnosticPiece>
BugReporterVisitor::getEndPath(BugReporterContext &BRC,
- const ExplodedNode *EndPathNode,
- BugReport &BR) {
+ const ExplodedNode *EndPathNode, BugReport &BR) {
return nullptr;
}
-PathDiagnosticPiece*
-BugReporterVisitor::getDefaultEndPath(BugReporterContext &BRC,
- const ExplodedNode *EndPathNode,
- BugReport &BR) {
+std::unique_ptr<PathDiagnosticPiece> BugReporterVisitor::getDefaultEndPath(
+ BugReporterContext &BRC, const ExplodedNode *EndPathNode, BugReport &BR) {
PathDiagnosticLocation L =
PathDiagnosticLocation::createEndOfPath(EndPathNode,BRC.getSourceManager());
@@ -119,13 +116,12 @@ BugReporterVisitor::getDefaultEndPath(BugReporterContext &BRC,
// Only add the statement itself as a range if we didn't specify any
// special ranges for this report.
- PathDiagnosticPiece *P = new PathDiagnosticEventPiece(L,
- BR.getDescription(),
- Beg == End);
+ auto P = llvm::make_unique<PathDiagnosticEventPiece>(L, BR.getDescription(),
+ Beg == End);
for (; Beg != End; ++Beg)
P->addRange(*Beg);
- return P;
+ return std::move(P);
}
@@ -222,7 +218,8 @@ public:
EnableNullFPSuppression = State->isNull(*RetLoc).isConstrainedTrue();
BR.markInteresting(CalleeContext);
- BR.addVisitor(new ReturnVisitor(CalleeContext, EnableNullFPSuppression));
+ BR.addVisitor(llvm::make_unique<ReturnVisitor>(CalleeContext,
+ EnableNullFPSuppression));
}
/// Returns true if any counter-suppression heuristics are enabled for
@@ -399,9 +396,9 @@ public:
llvm_unreachable("Invalid visit mode!");
}
- PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
- const ExplodedNode *N,
- BugReport &BR) override {
+ std::unique_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR) override {
if (EnableNullFPSuppression)
BR.markInvalid(ReturnVisitor::getTag(), StackFrame);
return nullptr;
@@ -569,8 +566,8 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
if (const VarRegion *OriginalR = BDR->getOriginalRegion(VR)) {
if (Optional<KnownSVal> KV =
State->getSVal(OriginalR).getAs<KnownSVal>())
- BR.addVisitor(new FindLastStoreBRVisitor(*KV, OriginalR,
- EnableNullFPSuppression));
+ BR.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ *KV, OriginalR, EnableNullFPSuppression));
}
}
}
@@ -979,8 +976,8 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
// got initialized.
if (const MemRegion *RR = getLocationRegionIfReference(Inner, N)) {
if (Optional<KnownSVal> KV = LVal.getAs<KnownSVal>())
- report.addVisitor(new FindLastStoreBRVisitor(*KV, RR,
- EnableNullFPSuppression));
+ report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ *KV, RR, EnableNullFPSuppression));
}
}
@@ -990,30 +987,26 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
report.markInteresting(R);
report.markInteresting(V);
- report.addVisitor(new UndefOrNullArgVisitor(R));
+ report.addVisitor(llvm::make_unique<UndefOrNullArgVisitor>(R));
// If the contents are symbolic, find out when they became null.
- if (V.getAsLocSymbol(/*IncludeBaseRegions*/ true)) {
- BugReporterVisitor *ConstraintTracker =
- new TrackConstraintBRVisitor(V.castAs<DefinedSVal>(), false);
- report.addVisitor(ConstraintTracker);
- }
+ if (V.getAsLocSymbol(/*IncludeBaseRegions*/ true))
+ report.addVisitor(llvm::make_unique<TrackConstraintBRVisitor>(
+ V.castAs<DefinedSVal>(), false));
// Add visitor, which will suppress inline defensive checks.
if (Optional<DefinedSVal> DV = V.getAs<DefinedSVal>()) {
- if (!DV->isZeroConstant() &&
- LVState->isNull(*DV).isConstrainedTrue() &&
- EnableNullFPSuppression) {
- BugReporterVisitor *IDCSuppressor =
- new SuppressInlineDefensiveChecksVisitor(*DV,
- LVNode);
- report.addVisitor(IDCSuppressor);
+ if (!DV->isZeroConstant() && LVState->isNull(*DV).isConstrainedTrue() &&
+ EnableNullFPSuppression) {
+ report.addVisitor(
+ llvm::make_unique<SuppressInlineDefensiveChecksVisitor>(*DV,
+ LVNode));
}
}
if (Optional<KnownSVal> KV = V.getAs<KnownSVal>())
- report.addVisitor(new FindLastStoreBRVisitor(*KV, R,
- EnableNullFPSuppression));
+ report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ *KV, R, EnableNullFPSuppression));
return true;
}
}
@@ -1044,12 +1037,12 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
RVal = state->getSVal(L->getRegion());
const MemRegion *RegionRVal = RVal.getAsRegion();
- report.addVisitor(new UndefOrNullArgVisitor(L->getRegion()));
+ report.addVisitor(llvm::make_unique<UndefOrNullArgVisitor>(L->getRegion()));
if (RegionRVal && isa<SymbolicRegion>(RegionRVal)) {
report.markInteresting(RegionRVal);
- report.addVisitor(new TrackConstraintBRVisitor(
- loc::MemRegionVal(RegionRVal), false));
+ report.addVisitor(llvm::make_unique<TrackConstraintBRVisitor>(
+ loc::MemRegionVal(RegionRVal), false));
}
}
@@ -1132,8 +1125,8 @@ void FindLastStoreBRVisitor::registerStatementVarDecls(BugReport &BR,
if (V.getAs<loc::ConcreteInt>() || V.getAs<nonloc::ConcreteInt>()) {
// Register a new visitor with the BugReport.
- BR.addVisitor(new FindLastStoreBRVisitor(V.castAs<KnownSVal>(), R,
- EnableNullFPSuppression));
+ BR.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ V.castAs<KnownSVal>(), R, EnableNullFPSuppression));
}
}
}
@@ -1517,8 +1510,7 @@ static bool isInStdNamespace(const Decl *D) {
return ND->isStdNamespace();
}
-
-PathDiagnosticPiece *
+std::unique_ptr<PathDiagnosticPiece>
LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
const ExplodedNode *N,
BugReport &BR) {
diff --git a/lib/StaticAnalyzer/Core/CallEvent.cpp b/lib/StaticAnalyzer/Core/CallEvent.cpp
index 7e0670a06608..f67354466006 100644
--- a/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -607,7 +607,7 @@ RuntimeDefinition CXXDestructorCall::getRuntimeDefinition() const {
ArrayRef<ParmVarDecl*> ObjCMethodCall::parameters() const {
const ObjCMethodDecl *D = getDecl();
if (!D)
- return ArrayRef<ParmVarDecl*>();
+ return None;
return D->parameters();
}
diff --git a/lib/StaticAnalyzer/Core/CoreEngine.cpp b/lib/StaticAnalyzer/Core/CoreEngine.cpp
index 4623c358a9e2..7844ad4a9c04 100644
--- a/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -14,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
@@ -164,7 +165,7 @@ WorkList* WorkList::makeBFSBlockDFSContents() {
bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
ProgramStateRef InitState) {
- if (G->num_roots() == 0) { // Initialize the analysis by constructing
+ if (G.num_roots() == 0) { // Initialize the analysis by constructing
// the root if none exists.
const CFGBlock *Entry = &(L->getCFG()->getEntry());
@@ -273,8 +274,8 @@ bool CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L,
ProgramStateRef InitState,
ExplodedNodeSet &Dst) {
bool DidNotFinish = ExecuteWorkList(L, Steps, InitState);
- for (ExplodedGraph::eop_iterator I = G->eop_begin(),
- E = G->eop_end(); I != E; ++I) {
+ for (ExplodedGraph::eop_iterator I = G.eop_begin(), E = G.eop_end(); I != E;
+ ++I) {
Dst.Add(*I);
}
return DidNotFinish;
@@ -346,6 +347,11 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
default:
llvm_unreachable("Analysis for this terminator not implemented.");
+ case Stmt::CXXBindTemporaryExprClass:
+ HandleCleanupTemporaryBranch(
+ cast<CXXBindTemporaryExpr>(B->getTerminator().getStmt()), B, Pred);
+ return;
+
// Model static initializers.
case Stmt::DeclStmtClass:
HandleStaticInit(cast<DeclStmt>(Term), B, Pred);
@@ -461,6 +467,17 @@ void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term,
enqueue(Dst);
}
+void CoreEngine::HandleCleanupTemporaryBranch(const CXXBindTemporaryExpr *BTE,
+ const CFGBlock *B,
+ ExplodedNode *Pred) {
+ assert(B->succ_size() == 2);
+ NodeBuilderContext Ctx(*this, B, Pred);
+ ExplodedNodeSet Dst;
+ SubEng.processCleanupTemporaryBranch(BTE, Ctx, Pred, Dst, *(B->succ_begin()),
+ *(B->succ_begin() + 1));
+ // Enqueue the new frontier onto the worklist.
+ enqueue(Dst);
+}
void CoreEngine::HandleStaticInit(const DeclStmt *DS, const CFGBlock *B,
ExplodedNode *Pred) {
@@ -494,13 +511,13 @@ void CoreEngine::generateNode(const ProgramPoint &Loc,
ExplodedNode *Pred) {
bool IsNew;
- ExplodedNode *Node = G->getNode(Loc, State, false, &IsNew);
+ ExplodedNode *Node = G.getNode(Loc, State, false, &IsNew);
if (Pred)
- Node->addPredecessor(Pred, *G); // Link 'Node' with its predecessor.
+ Node->addPredecessor(Pred, G); // Link 'Node' with its predecessor.
else {
assert (IsNew);
- G->addRoot(Node); // 'Node' has no predecessor. Make it a root.
+ G.addRoot(Node); // 'Node' has no predecessor. Make it a root.
}
// Only add 'Node' to the worklist if it was freshly generated.
@@ -549,8 +566,8 @@ void CoreEngine::enqueueStmtNode(ExplodedNode *N,
}
bool IsNew;
- ExplodedNode *Succ = G->getNode(Loc, N->getState(), false, &IsNew);
- Succ->addPredecessor(N, *G);
+ ExplodedNode *Succ = G.getNode(Loc, N->getState(), false, &IsNew);
+ Succ->addPredecessor(N, G);
if (IsNew)
WList->enqueue(Succ, Block, Idx+1);
@@ -565,8 +582,8 @@ ExplodedNode *CoreEngine::generateCallExitBeginNode(ExplodedNode *N) {
CallExitBegin Loc(LocCtx);
bool isNew;
- ExplodedNode *Node = G->getNode(Loc, N->getState(), false, &isNew);
- Node->addPredecessor(N, *G);
+ ExplodedNode *Node = G.getNode(Loc, N->getState(), false, &isNew);
+ Node->addPredecessor(N, G);
return isNew ? Node : nullptr;
}
@@ -596,7 +613,7 @@ void CoreEngine::enqueueEndOfFunction(ExplodedNodeSet &Set) {
WList->enqueue(N);
} else {
// TODO: We should run remove dead bindings here.
- G->addEndOfPath(N);
+ G.addEndOfPath(N);
NumPathsExplored++;
}
}
@@ -611,8 +628,8 @@ ExplodedNode* NodeBuilder::generateNodeImpl(const ProgramPoint &Loc,
bool MarkAsSink) {
HasGeneratedNodes = true;
bool IsNew;
- ExplodedNode *N = C.Eng.G->getNode(Loc, State, MarkAsSink, &IsNew);
- N->addPredecessor(FromN, *C.Eng.G);
+ ExplodedNode *N = C.Eng.G.getNode(Loc, State, MarkAsSink, &IsNew);
+ N->addPredecessor(FromN, C.Eng.G);
Frontier.erase(FromN);
if (!IsNew)
@@ -653,10 +670,10 @@ IndirectGotoNodeBuilder::generateNode(const iterator &I,
ProgramStateRef St,
bool IsSink) {
bool IsNew;
- ExplodedNode *Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
- Pred->getLocationContext()), St,
- IsSink, &IsNew);
- Succ->addPredecessor(Pred, *Eng.G);
+ ExplodedNode *Succ =
+ Eng.G.getNode(BlockEdge(Src, I.getBlock(), Pred->getLocationContext()),
+ St, IsSink, &IsNew);
+ Succ->addPredecessor(Pred, Eng.G);
if (!IsNew)
return nullptr;
@@ -673,10 +690,10 @@ SwitchNodeBuilder::generateCaseStmtNode(const iterator &I,
ProgramStateRef St) {
bool IsNew;
- ExplodedNode *Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
- Pred->getLocationContext()), St,
- false, &IsNew);
- Succ->addPredecessor(Pred, *Eng.G);
+ ExplodedNode *Succ =
+ Eng.G.getNode(BlockEdge(Src, I.getBlock(), Pred->getLocationContext()),
+ St, false, &IsNew);
+ Succ->addPredecessor(Pred, Eng.G);
if (!IsNew)
return nullptr;
@@ -698,10 +715,10 @@ SwitchNodeBuilder::generateDefaultCaseNode(ProgramStateRef St,
return nullptr;
bool IsNew;
- ExplodedNode *Succ = Eng.G->getNode(BlockEdge(Src, DefaultBlock,
- Pred->getLocationContext()), St,
- IsSink, &IsNew);
- Succ->addPredecessor(Pred, *Eng.G);
+ ExplodedNode *Succ =
+ Eng.G.getNode(BlockEdge(Src, DefaultBlock, Pred->getLocationContext()),
+ St, IsSink, &IsNew);
+ Succ->addPredecessor(Pred, Eng.G);
if (!IsNew)
return nullptr;
diff --git a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index 1c9a282b8298..010d26e48e1d 100644
--- a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -336,10 +336,10 @@ ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L,
return V;
}
-ExplodedGraph *
+std::unique_ptr<ExplodedGraph>
ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
InterExplodedGraphMap *ForwardMap,
- InterExplodedGraphMap *InverseMap) const{
+ InterExplodedGraphMap *InverseMap) const {
if (Nodes.empty())
return nullptr;
@@ -365,12 +365,9 @@ ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
const ExplodedNode *N = WL1.pop_back_val();
// Have we already visited this node? If so, continue to the next one.
- if (Pass1.count(N))
+ if (!Pass1.insert(N).second)
continue;
- // Otherwise, mark this node as visited.
- Pass1.insert(N);
-
// If this is a root enqueue it to the second worklist.
if (N->Preds.empty()) {
WL2.push_back(N);
@@ -378,9 +375,7 @@ ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
}
// Visit our predecessors and enqueue them.
- for (ExplodedNode::pred_iterator I = N->Preds.begin(), E = N->Preds.end();
- I != E; ++I)
- WL1.push_back(*I);
+ WL1.append(N->Preds.begin(), N->Preds.end());
}
// We didn't hit a root? Return with a null pointer for the new graph.
@@ -388,7 +383,7 @@ ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
return nullptr;
// Create an empty graph.
- ExplodedGraph* G = MakeEmptyGraph();
+ std::unique_ptr<ExplodedGraph> G = MakeEmptyGraph();
// ===- Pass 2 (forward DFS to construct the new graph) -===
while (!WL2.empty()) {
diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 4e4095c5e0d8..4699df8819bd 100644
--- a/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -51,6 +51,15 @@ STATISTIC(NumMaxBlockCountReachedInInlined,
STATISTIC(NumTimesRetriedWithoutInlining,
"The # of times we re-evaluated a call without inlining");
+typedef std::pair<const CXXBindTemporaryExpr *, const StackFrameContext *>
+ CXXBindTemporaryContext;
+
+// Keeps track of whether CXXBindTemporaryExpr nodes have been evaluated.
+// The StackFrameContext assures that nested calls due to inlined recursive
+// functions do not interfere.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(InitializedTemporariesSet,
+ llvm::ImmutableSet<CXXBindTemporaryContext>)
+
//===----------------------------------------------------------------------===//
// Engine construction and deletion.
//===----------------------------------------------------------------------===//
@@ -659,13 +668,71 @@ void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ ExplodedNodeSet CleanDtorState;
+ StmtNodeBuilder StmtBldr(Pred, CleanDtorState, *currBldrCtx);
+ ProgramStateRef State = Pred->getState();
+ if (State->contains<InitializedTemporariesSet>(
+ std::make_pair(D.getBindTemporaryExpr(), Pred->getStackFrame()))) {
+ // FIXME: Currently we insert temporary destructors for default parameters,
+ // but we don't insert the constructors.
+ State = State->remove<InitializedTemporariesSet>(
+ std::make_pair(D.getBindTemporaryExpr(), Pred->getStackFrame()));
+ }
+ StmtBldr.generateNode(D.getBindTemporaryExpr(), Pred, State);
QualType varType = D.getBindTemporaryExpr()->getSubExpr()->getType();
-
- // FIXME: Inlining of temporary destructors is not supported yet anyway, so we
- // just put a NULL region for now. This will need to be changed later.
+ // FIXME: Currently CleanDtorState can be empty here due to temporaries being
+ // bound to default parameters.
+ assert(CleanDtorState.size() <= 1);
+ ExplodedNode *CleanPred =
+ CleanDtorState.empty() ? Pred : *CleanDtorState.begin();
+ // FIXME: Inlining of temporary destructors is not supported yet anyway, so
+ // we just put a NULL region for now. This will need to be changed later.
VisitCXXDestructor(varType, nullptr, D.getBindTemporaryExpr(),
- /*IsBase=*/ false, Pred, Dst);
+ /*IsBase=*/false, CleanPred, Dst);
+}
+
+void ExprEngine::processCleanupTemporaryBranch(const CXXBindTemporaryExpr *BTE,
+ NodeBuilderContext &BldCtx,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst,
+ const CFGBlock *DstT,
+ const CFGBlock *DstF) {
+ BranchNodeBuilder TempDtorBuilder(Pred, Dst, BldCtx, DstT, DstF);
+ if (Pred->getState()->contains<InitializedTemporariesSet>(
+ std::make_pair(BTE, Pred->getStackFrame()))) {
+ TempDtorBuilder.markInfeasible(false);
+ TempDtorBuilder.generateNode(Pred->getState(), true, Pred);
+ } else {
+ TempDtorBuilder.markInfeasible(true);
+ TempDtorBuilder.generateNode(Pred->getState(), false, Pred);
+ }
+}
+
+void ExprEngine::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *BTE,
+ ExplodedNodeSet &PreVisit,
+ ExplodedNodeSet &Dst) {
+ if (!getAnalysisManager().options.includeTemporaryDtorsInCFG()) {
+ // In case we don't have temporary destructors in the CFG, do not mark
+ // the initialization - we would otherwise never clean it up.
+ Dst = PreVisit;
+ return;
+ }
+ StmtNodeBuilder StmtBldr(PreVisit, Dst, *currBldrCtx);
+ for (ExplodedNode *Node : PreVisit) {
+ ProgramStateRef State = Node->getState();
+
+ if (!State->contains<InitializedTemporariesSet>(
+ std::make_pair(BTE, Node->getStackFrame()))) {
+ // FIXME: Currently the state might already contain the marker due to
+ // incorrect handling of temporaries bound to default parameters; for
+ // those, we currently skip the CXXBindTemporaryExpr but rely on adding
+ // temporary destructor nodes.
+ State = State->add<InitializedTemporariesSet>(
+ std::make_pair(BTE, Node->getStackFrame()));
+ }
+ StmtBldr.generateNode(BTE, Node, State);
+ }
}
void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
@@ -685,6 +752,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXTryStmtClass:
case Stmt::CXXTypeidExprClass:
case Stmt::CXXUuidofExprClass:
+ case Stmt::CXXFoldExprClass:
case Stmt::MSPropertyRefExprClass:
case Stmt::CXXUnresolvedConstructExprClass:
case Stmt::DependentScopeDeclRefExprClass:
@@ -693,6 +761,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::ExpressionTraitExprClass:
case Stmt::UnresolvedLookupExprClass:
case Stmt::UnresolvedMemberExprClass:
+ case Stmt::TypoExprClass:
case Stmt::CXXNoexceptExprClass:
case Stmt::PackExpansionExprClass:
case Stmt::SubstNonTypeTemplateParmPackExprClass:
@@ -734,18 +803,24 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPParallelDirectiveClass:
case Stmt::OMPSimdDirectiveClass:
case Stmt::OMPForDirectiveClass:
+ case Stmt::OMPForSimdDirectiveClass:
case Stmt::OMPSectionsDirectiveClass:
case Stmt::OMPSectionDirectiveClass:
case Stmt::OMPSingleDirectiveClass:
case Stmt::OMPMasterDirectiveClass:
case Stmt::OMPCriticalDirectiveClass:
case Stmt::OMPParallelForDirectiveClass:
+ case Stmt::OMPParallelForSimdDirectiveClass:
case Stmt::OMPParallelSectionsDirectiveClass:
case Stmt::OMPTaskDirectiveClass:
case Stmt::OMPTaskyieldDirectiveClass:
case Stmt::OMPBarrierDirectiveClass:
case Stmt::OMPTaskwaitDirectiveClass:
case Stmt::OMPFlushDirectiveClass:
+ case Stmt::OMPOrderedDirectiveClass:
+ case Stmt::OMPAtomicDirectiveClass:
+ case Stmt::OMPTargetDirectiveClass:
+ case Stmt::OMPTeamsDirectiveClass:
llvm_unreachable("Stmt should not be in analyzer evaluation loop");
case Stmt::ObjCSubscriptRefExprClass:
@@ -771,6 +846,17 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
// Handled due to fully linearised CFG.
break;
+ case Stmt::CXXBindTemporaryExprClass: {
+ Bldr.takeNodes(Pred);
+ ExplodedNodeSet PreVisit;
+ getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
+ ExplodedNodeSet Next;
+ VisitCXXBindTemporaryExpr(cast<CXXBindTemporaryExpr>(S), PreVisit, Next);
+ getCheckerManager().runCheckersForPostStmt(Dst, Next, S, *this);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
// Cases not handled yet; but will handle some day.
case Stmt::DesignatedInitExprClass:
case Stmt::ExtVectorElementExprClass:
@@ -784,7 +870,6 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::ObjCProtocolExprClass:
case Stmt::ObjCSelectorExprClass:
case Stmt::ParenListExprClass:
- case Stmt::PredefinedExprClass:
case Stmt::ShuffleVectorExprClass:
case Stmt::ConvertVectorExprClass:
case Stmt::VAArgExprClass:
@@ -796,6 +881,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
// Cases we intentionally don't evaluate, since they don't need
// to be explicitly evaluated.
+ case Stmt::PredefinedExprClass:
case Stmt::AddrLabelExprClass:
case Stmt::AttributedStmtClass:
case Stmt::IntegerLiteralClass:
@@ -808,7 +894,6 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::SizeOfPackExprClass:
case Stmt::StringLiteralClass:
case Stmt::ObjCStringLiteralClass:
- case Stmt::CXXBindTemporaryExprClass:
case Stmt::CXXPseudoDestructorExprClass:
case Stmt::SubstNonTypeTemplateParmExprClass:
case Stmt::CXXNullPtrLiteralExprClass: {
@@ -1403,11 +1488,8 @@ static const Stmt *ResolveCondition(const Stmt *Condition,
if (!BO || !BO->isLogicalOp())
return Condition;
- // FIXME: This is a workaround until we handle temporary destructor branches
- // correctly; currently, temporary destructor branches lead to blocks that
- // only have a terminator (and no statements). These blocks violate the
- // invariant this function assumes.
- if (B->getTerminator().isTemporaryDtorsBranch()) return Condition;
+ assert(!B->getTerminator().isTemporaryDtorsBranch() &&
+ "Temporary destructor branches handled by processBindTemporary.");
// For logical operations, we still have the case where some branches
// use the traditional "merge" approach and others sink the branch
@@ -1436,6 +1518,8 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
ExplodedNodeSet &Dst,
const CFGBlock *DstT,
const CFGBlock *DstF) {
+ assert((!Condition || !isa<CXXBindTemporaryExpr>(Condition)) &&
+ "CXXBindTemporaryExprs are handled by processBindTemporary.");
const LocationContext *LCtx = Pred->getLocationContext();
PrettyStackTraceLocationContext StackCrashInfo(LCtx);
currBldrCtx = &BldCtx;
@@ -1599,10 +1683,29 @@ void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
builder.generateNode(I, state);
}
+#if 0
+static bool stackFrameDoesNotContainInitializedTemporaries(ExplodedNode &Pred) {
+ const StackFrameContext* Frame = Pred.getStackFrame();
+ const llvm::ImmutableSet<CXXBindTemporaryContext> &Set =
+ Pred.getState()->get<InitializedTemporariesSet>();
+ return std::find_if(Set.begin(), Set.end(),
+ [&](const CXXBindTemporaryContext &Ctx) {
+ if (Ctx.second == Frame) {
+ Ctx.first->dump();
+ llvm::errs() << "\n";
+ }
+ return Ctx.second == Frame;
+ }) == Set.end();
+}
+#endif
+
/// ProcessEndPath - Called by CoreEngine. Used to generate end-of-path
/// nodes when the control reaches the end of a function.
void ExprEngine::processEndOfFunction(NodeBuilderContext& BC,
ExplodedNode *Pred) {
+ // FIXME: Assert that stackFrameDoesNotContainInitializedTemporaries(*Pred)).
+ // We currently cannot enable this assert, as lifetime extended temporaries
+ // are not modelled correctly.
PrettyStackTraceLocationContext CrashInfo(Pred->getLocationContext());
StateMgr.EndPath(Pred->getState());
diff --git a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index b1e9f06cae00..88b5464d44be 100644
--- a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -20,8 +20,8 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Rewrite/Core/HTMLRewrite.h"
#include "clang/Rewrite/Core/Rewriter.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
diff --git a/lib/StaticAnalyzer/Core/MemRegion.cpp b/lib/StaticAnalyzer/Core/MemRegion.cpp
index 22711f54239a..76cead623298 100644
--- a/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -1116,17 +1116,6 @@ const SymbolicRegion *MemRegion::getSymbolicBase() const {
return nullptr;
}
-// FIXME: Merge with the implementation of the same method in Store.cpp
-static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *D = RT->getDecl();
- if (!D->getDefinition())
- return false;
- }
-
- return true;
-}
-
RegionRawOffset ElementRegion::getAsArrayOffset() const {
CharUnits offset = CharUnits::Zero();
const ElementRegion *ER = this;
@@ -1148,7 +1137,7 @@ RegionRawOffset ElementRegion::getAsArrayOffset() const {
QualType elemType = ER->getElementType();
// If we are pointing to an incomplete type, go no further.
- if (!IsCompleteType(C, elemType)) {
+ if (elemType->isIncompleteType()) {
superR = ER;
break;
}
@@ -1288,7 +1277,7 @@ RegionOffset MemRegion::getAsOffset() const {
R = ER->getSuperRegion();
QualType EleTy = ER->getValueType();
- if (!IsCompleteType(getContext(), EleTy)) {
+ if (EleTy->isIncompleteType()) {
// We cannot compute the offset of the base class.
SymbolicOffsetBase = R;
continue;
diff --git a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
index fd25bd8e3af3..b971fff642d4 100644
--- a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
+++ b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -197,9 +197,8 @@ PathDiagnosticConsumer::~PathDiagnosticConsumer() {
}
}
-void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) {
- std::unique_ptr<PathDiagnostic> OwningD(D);
-
+void PathDiagnosticConsumer::HandlePathDiagnostic(
+ std::unique_ptr<PathDiagnostic> D) {
if (!D || D->path.empty())
return;
@@ -213,7 +212,7 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) {
if (!supportsCrossFileDiagnostics()) {
// Verify that the entire path is from the same FileID.
FileID FID;
- const SourceManager &SMgr = (*D->path.begin())->getLocation().getManager();
+ const SourceManager &SMgr = D->path.front()->getLocation().getManager();
SmallVector<const PathPieces *, 5> WorkList;
WorkList.push_back(&D->path);
@@ -272,12 +271,12 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) {
if (orig_size <= new_size)
return;
- assert(orig != D);
+ assert(orig != D.get());
Diags.RemoveNode(orig);
delete orig;
}
- Diags.InsertNode(OwningD.release());
+ Diags.InsertNode(D.release());
}
static Optional<bool> comparePath(const PathPieces &X, const PathPieces &Y);
diff --git a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index ba3ad2ef16c5..a2c66f881482 100644
--- a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -42,7 +42,7 @@ namespace {
void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
FilesMade *filesMade) override;
- virtual StringRef getName() const override {
+ StringRef getName() const override {
return "PlistDiagnostics";
}
@@ -338,10 +338,10 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
}
// Open the file.
- std::string ErrMsg;
- llvm::raw_fd_ostream o(OutputFile.c_str(), ErrMsg, llvm::sys::fs::F_Text);
- if (!ErrMsg.empty()) {
- llvm::errs() << "warning: could not create file: " << OutputFile << '\n';
+ std::error_code EC;
+ llvm::raw_fd_ostream o(OutputFile, EC, llvm::sys::fs::F_Text);
+ if (EC) {
+ llvm::errs() << "warning: could not create file: " << EC.message() << '\n';
return;
}
diff --git a/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h b/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h
index c2af36f40704..e7cc23ca8234 100644
--- a/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h
+++ b/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_STATICANALYZER_PRETTYSTACKTRACELOCATIONCONTEXT_H
-#define LLVM_CLANG_STATICANALYZER_PRETTYSTACKTRACELOCATIONCONTEXT_H
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CORE_PRETTYSTACKTRACELOCATIONCONTEXT_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CORE_PRETTYSTACKTRACELOCATIONCONTEXT_H
#include "clang/Analysis/AnalysisContext.h"
diff --git a/lib/StaticAnalyzer/Core/ProgramState.cpp b/lib/StaticAnalyzer/Core/ProgramState.cpp
index 1714a2744a1a..60b32c722ebf 100644
--- a/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -75,8 +75,8 @@ ProgramStateManager::ProgramStateManager(ASTContext &Ctx,
: Eng(SubEng), EnvMgr(alloc), GDMFactory(alloc),
svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
CallEventMgr(new CallEventManager(alloc)), Alloc(alloc) {
- StoreMgr.reset((*CreateSMgr)(*this));
- ConstraintMgr.reset((*CreateCMgr)(*this, SubEng));
+ StoreMgr = (*CreateSMgr)(*this);
+ ConstraintMgr = (*CreateCMgr)(*this, SubEng);
}
diff --git a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 77578d378fb3..170f7c02b882 100644
--- a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -328,9 +328,9 @@ private:
} // end anonymous namespace
-ConstraintManager *
+std::unique_ptr<ConstraintManager>
ento::CreateRangeConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
- return new RangeConstraintManager(Eng, StMgr.getSValBuilder());
+ return llvm::make_unique<RangeConstraintManager>(Eng, StMgr.getSValBuilder());
}
const llvm::APSInt* RangeConstraintManager::getSymVal(ProgramStateRef St,
diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp
index 3bbbb3430792..45056226c9e0 100644
--- a/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -609,16 +609,17 @@ public: // Part of public interface to class.
// RegionStore creation.
//===----------------------------------------------------------------------===//
-StoreManager *ento::CreateRegionStoreManager(ProgramStateManager& StMgr) {
+std::unique_ptr<StoreManager>
+ento::CreateRegionStoreManager(ProgramStateManager &StMgr) {
RegionStoreFeatures F = maximal_features_tag();
- return new RegionStoreManager(StMgr, F);
+ return llvm::make_unique<RegionStoreManager>(StMgr, F);
}
-StoreManager *
+std::unique_ptr<StoreManager>
ento::CreateFieldsOnlyRegionStoreManager(ProgramStateManager &StMgr) {
RegionStoreFeatures F = minimal_features_tag();
F.enableFields(true);
- return new RegionStoreManager(StMgr, F);
+ return llvm::make_unique<RegionStoreManager>(StMgr, F);
}
@@ -708,7 +709,7 @@ public:
}
bool AddToWorkList(WorkListElement E, const ClusterBindings *C) {
- if (C && !Visited.insert(C))
+ if (C && !Visited.insert(C).second)
return false;
WL.push_back(E);
return true;
diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
index 21e2283511ae..a72d1d4f9218 100644
--- a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
+++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_GR_SIMPLE_CONSTRAINT_MANAGER_H
-#define LLVM_CLANG_GR_SIMPLE_CONSTRAINT_MANAGER_H
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CORE_SIMPLECONSTRAINTMANAGER_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CORE_SIMPLECONSTRAINTMANAGER_H
#include "clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
diff --git a/lib/StaticAnalyzer/Core/Store.cpp b/lib/StaticAnalyzer/Core/Store.cpp
index e38be3e0b7dd..99ec1e704340 100644
--- a/lib/StaticAnalyzer/Core/Store.cpp
+++ b/lib/StaticAnalyzer/Core/Store.cpp
@@ -48,17 +48,6 @@ const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base,
return MRMgr.getElementRegion(EleTy, idx, Base, svalBuilder.getContext());
}
-// FIXME: Merge with the implementation of the same method in MemRegion.cpp
-static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *D = RT->getDecl();
- if (!D->getDefinition())
- return false;
- }
-
- return true;
-}
-
StoreRef StoreManager::BindDefault(Store store, const MemRegion *R, SVal V) {
return StoreRef(store, *this);
}
@@ -196,7 +185,7 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
const MemRegion *newSuperR = nullptr;
// We can only compute sizeof(PointeeTy) if it is a complete type.
- if (IsCompleteType(Ctx, PointeeTy)) {
+ if (!PointeeTy->isIncompleteType()) {
// Compute the size in **bytes**.
CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy);
if (!pointeeTySize.isZero()) {
diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index f0dd2742352b..183ef358df63 100644
--- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h"
+#include "ModelInjector.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/DataRecursiveASTVisitor.h"
#include "clang/AST/Decl.h"
@@ -21,8 +22,10 @@
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CallGraph.h"
+#include "clang/Analysis/CodeInjector.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
@@ -51,7 +54,7 @@ using llvm::SmallPtrSet;
#define DEBUG_TYPE "AnalysisConsumer"
-static ExplodedNode::Auditor* CreateUbiViz();
+static std::unique_ptr<ExplodedNode::Auditor> CreateUbiViz();
STATISTIC(NumFunctionTopLevel, "The # of functions at top level.");
STATISTIC(NumFunctionsAnalyzed,
@@ -157,6 +160,7 @@ public:
const std::string OutDir;
AnalyzerOptionsRef Opts;
ArrayRef<std::string> Plugins;
+ CodeInjector *Injector;
/// \brief Stores the declarations from the local translation unit.
/// Note, we pre-compute the local declarations at parse time as an
@@ -184,9 +188,10 @@ public:
AnalysisConsumer(const Preprocessor& pp,
const std::string& outdir,
AnalyzerOptionsRef opts,
- ArrayRef<std::string> plugins)
- : RecVisitorMode(0), RecVisitorBR(nullptr),
- Ctx(nullptr), PP(pp), OutDir(outdir), Opts(opts), Plugins(plugins) {
+ ArrayRef<std::string> plugins,
+ CodeInjector *injector)
+ : RecVisitorMode(0), RecVisitorBR(nullptr), Ctx(nullptr), PP(pp),
+ OutDir(outdir), Opts(opts), Plugins(plugins), Injector(injector) {
DigestAnalyzerOptions();
if (Opts->PrintStats) {
llvm::EnableStatistics();
@@ -284,16 +289,12 @@ public:
void Initialize(ASTContext &Context) override {
Ctx = &Context;
- checkerMgr.reset(createCheckerManager(*Opts, PP.getLangOpts(), Plugins,
- PP.getDiagnostics()));
- Mgr.reset(new AnalysisManager(*Ctx,
- PP.getDiagnostics(),
- PP.getLangOpts(),
- PathConsumers,
- CreateStoreMgr,
- CreateConstraintMgr,
- checkerMgr.get(),
- *Opts));
+ checkerMgr = createCheckerManager(*Opts, PP.getLangOpts(), Plugins,
+ PP.getDiagnostics());
+
+ Mgr = llvm::make_unique<AnalysisManager>(
+ *Ctx, PP.getDiagnostics(), PP.getLangOpts(), PathConsumers,
+ CreateStoreMgr, CreateConstraintMgr, checkerMgr.get(), *Opts, Injector);
}
/// \brief Store the top level decls in the set to be processed later on.
@@ -307,7 +308,7 @@ public:
/// analyzed. This allows to redefine the default inlining policies when
/// analyzing a given function.
ExprEngine::InliningModes
- getInliningModeForFunction(const Decl *D, const SetOfConstDecls &Visited);
+ getInliningModeForFunction(const Decl *D, const SetOfConstDecls &Visited);
/// \brief Build the call graph for all the top level decls of this TU and
/// use it to define the order in which the functions should be visited.
@@ -506,6 +507,11 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred())
return;
+ // Don't analyze if the user explicitly asked for no checks to be performed
+ // on this file.
+ if (Opts->DisableAllChecks)
+ return;
+
{
if (TUTotalTimer) TUTotalTimer->startTimer();
@@ -643,7 +649,7 @@ void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
// Set the graph auditor.
std::unique_ptr<ExplodedNode::Auditor> Auditor;
if (Mgr->options.visualizeExplodedGraphWithUbiGraph) {
- Auditor.reset(CreateUbiViz());
+ Auditor = CreateUbiViz();
ExplodedNode::SetAuditor(Auditor.get());
}
@@ -687,14 +693,18 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
// AnalysisConsumer creation.
//===----------------------------------------------------------------------===//
-AnalysisASTConsumer *
-ento::CreateAnalysisConsumer(const Preprocessor &pp, const std::string &outDir,
- AnalyzerOptionsRef opts,
- ArrayRef<std::string> plugins) {
+std::unique_ptr<AnalysisASTConsumer>
+ento::CreateAnalysisConsumer(CompilerInstance &CI) {
// Disable the effects of '-Werror' when using the AnalysisConsumer.
- pp.getDiagnostics().setWarningsAsErrors(false);
+ CI.getPreprocessor().getDiagnostics().setWarningsAsErrors(false);
+
+ AnalyzerOptionsRef analyzerOpts = CI.getAnalyzerOpts();
+ bool hasModelPath = analyzerOpts->Config.count("model-path") > 0;
- return new AnalysisConsumer(pp, outDir, opts, plugins);
+ return llvm::make_unique<AnalysisConsumer>(
+ CI.getPreprocessor(), CI.getFrontendOpts().OutputFile, analyzerOpts,
+ CI.getFrontendOpts().Plugins,
+ hasModelPath ? new ModelInjector(CI) : nullptr);
}
//===----------------------------------------------------------------------===//
@@ -721,7 +731,7 @@ public:
} // end anonymous namespace
-static ExplodedNode::Auditor* CreateUbiViz() {
+static std::unique_ptr<ExplodedNode::Auditor> CreateUbiViz() {
SmallString<128> P;
int FD;
llvm::sys::fs::createTemporaryFile("llvm_ubi", "", FD, P);
@@ -729,7 +739,7 @@ static ExplodedNode::Auditor* CreateUbiViz() {
auto Stream = llvm::make_unique<llvm::raw_fd_ostream>(FD, true);
- return new UbigraphViz(std::move(Stream), P);
+ return llvm::make_unique<UbigraphViz>(std::move(Stream), P);
}
void UbigraphViz::AddEdge(ExplodedNode *Src, ExplodedNode *Dst) {
@@ -778,7 +788,9 @@ UbigraphViz::~UbigraphViz() {
Out.reset();
llvm::errs() << "Running 'ubiviz' program... ";
std::string ErrMsg;
- std::string Ubiviz = llvm::sys::FindProgramByName("ubiviz");
+ std::string Ubiviz;
+ if (auto Path = llvm::sys::findProgramByName("ubiviz"))
+ Ubiviz = *Path;
std::vector<const char*> args;
args.push_back(Ubiviz.c_str());
args.push_back(Filename.c_str());
diff --git a/lib/StaticAnalyzer/Frontend/CMakeLists.txt b/lib/StaticAnalyzer/Frontend/CMakeLists.txt
index 5349ed93e2c4..e3ca91aec9cd 100644
--- a/lib/StaticAnalyzer/Frontend/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Frontend/CMakeLists.txt
@@ -7,13 +7,16 @@ set(LLVM_LINK_COMPONENTS
add_clang_library(clangStaticAnalyzerFrontend
AnalysisConsumer.cpp
CheckerRegistration.cpp
+ ModelConsumer.cpp
FrontendActions.cpp
+ ModelInjector.cpp
LINK_LIBS
clangAST
clangAnalysis
clangBasic
clangFrontend
+ clangLex
clangStaticAnalyzerCheckers
clangStaticAnalyzerCore
)
diff --git a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
index e2577c3c729f..36565cb6e2ca 100644
--- a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -99,11 +99,10 @@ void ClangCheckerRegistry::warnIncompatible(DiagnosticsEngine *diags,
<< pluginAPIVersion;
}
-
-CheckerManager *ento::createCheckerManager(AnalyzerOptions &opts,
- const LangOptions &langOpts,
- ArrayRef<std::string> plugins,
- DiagnosticsEngine &diags) {
+std::unique_ptr<CheckerManager>
+ento::createCheckerManager(AnalyzerOptions &opts, const LangOptions &langOpts,
+ ArrayRef<std::string> plugins,
+ DiagnosticsEngine &diags) {
std::unique_ptr<CheckerManager> checkerMgr(
new CheckerManager(langOpts, &opts));
@@ -118,12 +117,15 @@ CheckerManager *ento::createCheckerManager(AnalyzerOptions &opts,
checkerMgr->finishedCheckerRegistration();
for (unsigned i = 0, e = checkerOpts.size(); i != e; ++i) {
- if (checkerOpts[i].isUnclaimed())
+ if (checkerOpts[i].isUnclaimed()) {
diags.Report(diag::err_unknown_analyzer_checker)
<< checkerOpts[i].getName();
+ diags.Report(diag::note_suggest_disabling_all_checkers);
+ }
+
}
- return checkerMgr.release();
+ return std::move(checkerMgr);
}
void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins) {
diff --git a/lib/StaticAnalyzer/Frontend/FrontendActions.cpp b/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
index aa3807732632..b33608042ce3 100644
--- a/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
+++ b/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
@@ -8,16 +8,21 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
-#include "clang/Frontend/CompilerInstance.h"
#include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h"
+#include "clang/StaticAnalyzer/Frontend/ModelConsumer.h"
using namespace clang;
using namespace ento;
-ASTConsumer *AnalysisAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
- return CreateAnalysisConsumer(CI.getPreprocessor(),
- CI.getFrontendOpts().OutputFile,
- CI.getAnalyzerOpts(),
- CI.getFrontendOpts().Plugins);
+std::unique_ptr<ASTConsumer>
+AnalysisAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return CreateAnalysisConsumer(CI);
}
+ParseModelFileAction::ParseModelFileAction(llvm::StringMap<Stmt *> &Bodies)
+ : Bodies(Bodies) {}
+
+std::unique_ptr<ASTConsumer>
+ParseModelFileAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return llvm::make_unique<ModelConsumer>(Bodies);
+}
diff --git a/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp b/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
new file mode 100644
index 000000000000..a65a5ee0a451
--- /dev/null
+++ b/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
@@ -0,0 +1,42 @@
+//===--- ModelConsumer.cpp - ASTConsumer for consuming model files --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements an ASTConsumer for consuming model files.
+///
+/// This ASTConsumer handles the AST of a parsed model file. All top level
+/// function definitions will be collected from that model file for later
+/// retrieval during the static analysis. The body of these functions will not
+/// be injected into the ASTUnit of the analyzed translation unit. It will be
+/// available through the BodyFarm which is utilized by the AnalysisDeclContext
+/// class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Frontend/ModelConsumer.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclGroup.h"
+
+using namespace clang;
+using namespace ento;
+
+ModelConsumer::ModelConsumer(llvm::StringMap<Stmt *> &Bodies)
+ : Bodies(Bodies) {}
+
+bool ModelConsumer::HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+
+ // Only interested in definitions.
+ const FunctionDecl *func = llvm::dyn_cast<FunctionDecl>(*I);
+ if (func && func->hasBody()) {
+ Bodies.insert(std::make_pair(func->getName(), func->getBody()));
+ }
+ }
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Frontend/ModelInjector.cpp b/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
new file mode 100644
index 000000000000..63bb1e245885
--- /dev/null
+++ b/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
@@ -0,0 +1,117 @@
+//===-- ModelInjector.cpp ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ModelInjector.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Support/FileSystem.h"
+#include <string>
+#include <utility>
+
+using namespace clang;
+using namespace ento;
+
+ModelInjector::ModelInjector(CompilerInstance &CI) : CI(CI) {}
+
+Stmt *ModelInjector::getBody(const FunctionDecl *D) {
+ onBodySynthesis(D);
+ return Bodies[D->getName()];
+}
+
+Stmt *ModelInjector::getBody(const ObjCMethodDecl *D) {
+ onBodySynthesis(D);
+ return Bodies[D->getName()];
+}
+
+void ModelInjector::onBodySynthesis(const NamedDecl *D) {
+
+ // FIXME: what about overloads? Declarations can be used as keys but what
+ // about file name index? Mangled names may not be suitable for that either.
+ if (Bodies.count(D->getName()) != 0)
+ return;
+
+ SourceManager &SM = CI.getSourceManager();
+ FileID mainFileID = SM.getMainFileID();
+
+ AnalyzerOptionsRef analyzerOpts = CI.getAnalyzerOpts();
+ llvm::StringRef modelPath = analyzerOpts->Config["model-path"];
+
+ llvm::SmallString<128> fileName;
+
+ if (!modelPath.empty())
+ fileName =
+ llvm::StringRef(modelPath.str() + "/" + D->getName().str() + ".model");
+ else
+ fileName = llvm::StringRef(D->getName().str() + ".model");
+
+ if (!llvm::sys::fs::exists(fileName.str())) {
+ Bodies[D->getName()] = nullptr;
+ return;
+ }
+
+ IntrusiveRefCntPtr<CompilerInvocation> Invocation(
+ new CompilerInvocation(CI.getInvocation()));
+
+ FrontendOptions &FrontendOpts = Invocation->getFrontendOpts();
+ InputKind IK = IK_CXX; // FIXME
+ FrontendOpts.Inputs.clear();
+ FrontendOpts.Inputs.push_back(FrontendInputFile(fileName, IK));
+ FrontendOpts.DisableFree = true;
+
+ Invocation->getDiagnosticOpts().VerifyDiagnostics = 0;
+
+ // Modules are parsed by a separate CompilerInstance, so this code mimics that
+ // behavior for models
+ CompilerInstance Instance;
+ Instance.setInvocation(&*Invocation);
+ Instance.createDiagnostics(
+ new ForwardingDiagnosticConsumer(CI.getDiagnosticClient()),
+ /*ShouldOwnClient=*/true);
+
+ Instance.getDiagnostics().setSourceManager(&SM);
+
+ Instance.setVirtualFileSystem(&CI.getVirtualFileSystem());
+
+ // The instance wants to take ownership, however DisableFree frontend option
+ // is set to true to avoid double free issues
+ Instance.setFileManager(&CI.getFileManager());
+ Instance.setSourceManager(&SM);
+ Instance.setPreprocessor(&CI.getPreprocessor());
+ Instance.setASTContext(&CI.getASTContext());
+
+ Instance.getPreprocessor().InitializeForModelFile();
+
+ ParseModelFileAction parseModelFile(Bodies);
+
+ const unsigned ThreadStackSize = 8 << 20;
+ llvm::CrashRecoveryContext CRC;
+
+ CRC.RunSafelyOnThread([&]() { Instance.ExecuteAction(parseModelFile); },
+ ThreadStackSize);
+
+ Instance.getPreprocessor().FinalizeForModelFile();
+
+ Instance.resetAndLeakSourceManager();
+ Instance.resetAndLeakFileManager();
+ Instance.resetAndLeakPreprocessor();
+
+ // The preprocessor enters to the main file id when parsing is started, so
+ // the main file id is changed to the model file during parsing and it needs
+ // to be reseted to the former main file id after parsing of the model file
+ // is done.
+ SM.setMainFileID(mainFileID);
+}
diff --git a/lib/StaticAnalyzer/Frontend/ModelInjector.h b/lib/StaticAnalyzer/Frontend/ModelInjector.h
new file mode 100644
index 000000000000..fd24e32f3a3d
--- /dev/null
+++ b/lib/StaticAnalyzer/Frontend/ModelInjector.h
@@ -0,0 +1,74 @@
+//===-- ModelInjector.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines the clang::ento::ModelInjector class which implements the
+/// clang::CodeInjector interface. This class is responsible for injecting
+/// function definitions that were synthesized from model files.
+///
+/// Model files allow definitions of functions to be lazily constituted for functions
+/// which lack bodies in the original source code. This allows the analyzer
+/// to more precisely analyze code that calls such functions, analyzing the
+/// artificial definitions (which typically approximate the semantics of the
+/// called function) when called by client code. These definitions are
+/// reconstituted lazily, on-demand, by the static analyzer engine.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_FRONTEND_MODELINJECTOR_H
+#define LLVM_CLANG_SA_FRONTEND_MODELINJECTOR_H
+
+#include "clang/Analysis/CodeInjector.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/StringMap.h"
+#include <map>
+#include <memory>
+#include <vector>
+
+namespace clang {
+
+class CompilerInstance;
+class ASTUnit;
+class ASTReader;
+class NamedDecl;
+class Module;
+
+namespace ento {
+class ModelInjector : public CodeInjector {
+public:
+ ModelInjector(CompilerInstance &CI);
+ Stmt *getBody(const FunctionDecl *D);
+ Stmt *getBody(const ObjCMethodDecl *D);
+
+private:
+ /// \brief Synthesize a body for a declaration
+ ///
+ /// This method first looks up the appropriate model file based on the
+ /// model-path configuration option and the name of the declaration that is
+ /// looked up. If no model were synthesized yet for a function with that name
+ /// it will create a new compiler instance to parse the model file using the
+ /// ASTContext, Preprocessor, SourceManager of the original compiler instance.
+ /// The former resources are shared between the two compiler instance, so the
+ /// newly created instance have to "leak" these objects, since they are owned
+ /// by the original instance.
+ ///
+ /// The model-path should be either an absolute path or relative to the
+ /// working directory of the compiler.
+ void onBodySynthesis(const NamedDecl *D);
+
+ CompilerInstance &CI;
+
+ // FIXME: double memoization is redundant, with memoization both here and in
+ // BodyFarm.
+ llvm::StringMap<Stmt *> Bodies;
+};
+}
+}
+
+#endif
diff --git a/lib/Tooling/ArgumentsAdjusters.cpp b/lib/Tooling/ArgumentsAdjusters.cpp
index a69971e006eb..5a67db0e176f 100644
--- a/lib/Tooling/ArgumentsAdjusters.cpp
+++ b/lib/Tooling/ArgumentsAdjusters.cpp
@@ -19,39 +19,66 @@
namespace clang {
namespace tooling {
-void ArgumentsAdjuster::anchor() {
+/// Add -fsyntax-only option to the commnand line arguments.
+ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
+ return [](const CommandLineArguments &Args) {
+ CommandLineArguments AdjustedArgs;
+ for (size_t i = 0, e = Args.size(); i != e; ++i) {
+ StringRef Arg = Args[i];
+ // FIXME: Remove options that generate output.
+ if (!Arg.startswith("-fcolor-diagnostics") &&
+ !Arg.startswith("-fdiagnostics-color"))
+ AdjustedArgs.push_back(Args[i]);
+ }
+ AdjustedArgs.push_back("-fsyntax-only");
+ return AdjustedArgs;
+ };
}
-/// Add -fsyntax-only option to the commnand line arguments.
-CommandLineArguments
-ClangSyntaxOnlyAdjuster::Adjust(const CommandLineArguments &Args) {
- CommandLineArguments AdjustedArgs;
- for (size_t i = 0, e = Args.size(); i != e; ++i) {
- StringRef Arg = Args[i];
- // FIXME: Remove options that generate output.
- if (!Arg.startswith("-fcolor-diagnostics") &&
- !Arg.startswith("-fdiagnostics-color"))
- AdjustedArgs.push_back(Args[i]);
- }
- AdjustedArgs.push_back("-fsyntax-only");
- return AdjustedArgs;
+ArgumentsAdjuster getClangStripOutputAdjuster() {
+ return [](const CommandLineArguments &Args) {
+ CommandLineArguments AdjustedArgs;
+ for (size_t i = 0, e = Args.size(); i < e; ++i) {
+ StringRef Arg = Args[i];
+ if (!Arg.startswith("-o"))
+ AdjustedArgs.push_back(Args[i]);
+
+ if (Arg == "-o") {
+ // Output is specified as -o foo. Skip the next argument also.
+ ++i;
+ }
+ // Else, the output is specified as -ofoo. Just do nothing.
+ }
+ return AdjustedArgs;
+ };
}
-CommandLineArguments
-ClangStripOutputAdjuster::Adjust(const CommandLineArguments &Args) {
- CommandLineArguments AdjustedArgs;
- for (size_t i = 0, e = Args.size(); i < e; ++i) {
- StringRef Arg = Args[i];
- if(!Arg.startswith("-o"))
- AdjustedArgs.push_back(Args[i]);
-
- if(Arg == "-o") {
- // Output is specified as -o foo. Skip the next argument also.
- ++i;
+ArgumentsAdjuster getInsertArgumentAdjuster(const CommandLineArguments &Extra,
+ ArgumentInsertPosition Pos) {
+ return [Extra, Pos](const CommandLineArguments &Args) {
+ CommandLineArguments Return(Args);
+
+ CommandLineArguments::iterator I;
+ if (Pos == ArgumentInsertPosition::END) {
+ I = Return.end();
+ } else {
+ I = Return.begin();
+ ++I; // To leave the program name in place
}
- // Else, the output is specified as -ofoo. Just do nothing.
- }
- return AdjustedArgs;
+
+ Return.insert(I, Extra.begin(), Extra.end());
+ return Return;
+ };
+}
+
+ArgumentsAdjuster getInsertArgumentAdjuster(const char *Extra,
+ ArgumentInsertPosition Pos) {
+ return getInsertArgumentAdjuster(CommandLineArguments(1, Extra), Pos);
+}
+
+ArgumentsAdjuster combineAdjusters(ArgumentsAdjuster First,
+ ArgumentsAdjuster Second) {
+ return std::bind(Second, std::bind(First, std::placeholders::_1));
}
} // end namespace tooling
diff --git a/lib/Tooling/CMakeLists.txt b/lib/Tooling/CMakeLists.txt
index 2bf9652fa376..b5c3d54e5fc1 100644
--- a/lib/Tooling/CMakeLists.txt
+++ b/lib/Tooling/CMakeLists.txt
@@ -1,5 +1,7 @@
set(LLVM_LINK_COMPONENTS support)
+add_subdirectory(Core)
+
add_clang_library(clangTooling
ArgumentsAdjusters.cpp
CommonOptionsParser.cpp
@@ -18,4 +20,5 @@ add_clang_library(clangTooling
clangFrontend
clangLex
clangRewrite
+ clangToolingCore
)
diff --git a/lib/Tooling/CommonOptionsParser.cpp b/lib/Tooling/CommonOptionsParser.cpp
index e0b844c067d4..91c74a4c3359 100644
--- a/lib/Tooling/CommonOptionsParser.cpp
+++ b/lib/Tooling/CommonOptionsParser.cpp
@@ -25,6 +25,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/CommandLine.h"
+#include "clang/Tooling/ArgumentsAdjusters.h"
#include "clang/Tooling/CommonOptionsParser.h"
#include "clang/Tooling/Tooling.h"
@@ -53,6 +54,42 @@ const char *const CommonOptionsParser::HelpMessage =
"\tsuffix of a path in the compile command database.\n"
"\n";
+class ArgumentsAdjustingCompilations : public CompilationDatabase {
+public:
+ ArgumentsAdjustingCompilations(
+ std::unique_ptr<CompilationDatabase> Compilations)
+ : Compilations(std::move(Compilations)) {}
+
+ void appendArgumentsAdjuster(ArgumentsAdjuster Adjuster) {
+ Adjusters.push_back(Adjuster);
+ }
+
+ std::vector<CompileCommand>
+ getCompileCommands(StringRef FilePath) const override {
+ return adjustCommands(Compilations->getCompileCommands(FilePath));
+ }
+
+ std::vector<std::string> getAllFiles() const override {
+ return Compilations->getAllFiles();
+ }
+
+ std::vector<CompileCommand> getAllCompileCommands() const override {
+ return adjustCommands(Compilations->getAllCompileCommands());
+ }
+
+private:
+ std::unique_ptr<CompilationDatabase> Compilations;
+ std::vector<ArgumentsAdjuster> Adjusters;
+
+ std::vector<CompileCommand>
+ adjustCommands(std::vector<CompileCommand> Commands) const {
+ for (CompileCommand &Command : Commands)
+ for (const auto &Adjuster : Adjusters)
+ Command.CommandLine = Adjuster(Command.CommandLine);
+ return Commands;
+ }
+};
+
CommonOptionsParser::CommonOptionsParser(int &argc, const char **argv,
cl::OptionCategory &Category,
const char *Overview) {
@@ -65,6 +102,16 @@ CommonOptionsParser::CommonOptionsParser(int &argc, const char **argv,
cl::Positional, cl::desc("<source0> [... <sourceN>]"), cl::OneOrMore,
cl::cat(Category));
+ static cl::list<std::string> ArgsAfter(
+ "extra-arg",
+ cl::desc("Additional argument to append to the compiler command line"),
+ cl::cat(Category));
+
+ static cl::list<std::string> ArgsBefore(
+ "extra-arg-before",
+ cl::desc("Additional argument to prepend to the compiler command line"),
+ cl::cat(Category));
+
// Hide unrelated options.
StringMap<cl::Option*> Options;
cl::getRegisteredOptions(Options);
@@ -82,13 +129,21 @@ CommonOptionsParser::CommonOptionsParser(int &argc, const char **argv,
if (!Compilations) {
std::string ErrorMessage;
if (!BuildPath.empty()) {
- Compilations.reset(CompilationDatabase::autoDetectFromDirectory(
- BuildPath, ErrorMessage));
+ Compilations =
+ CompilationDatabase::autoDetectFromDirectory(BuildPath, ErrorMessage);
} else {
- Compilations.reset(CompilationDatabase::autoDetectFromSource(
- SourcePaths[0], ErrorMessage));
+ Compilations = CompilationDatabase::autoDetectFromSource(SourcePaths[0],
+ ErrorMessage);
}
if (!Compilations)
llvm::report_fatal_error(ErrorMessage);
}
+ auto AdjustingCompilations =
+ llvm::make_unique<ArgumentsAdjustingCompilations>(
+ std::move(Compilations));
+ AdjustingCompilations->appendArgumentsAdjuster(
+ getInsertArgumentAdjuster(ArgsBefore, ArgumentInsertPosition::BEGIN));
+ AdjustingCompilations->appendArgumentsAdjuster(
+ getInsertArgumentAdjuster(ArgsAfter, ArgumentInsertPosition::END));
+ Compilations = std::move(AdjustingCompilations);
}
diff --git a/lib/Tooling/CompilationDatabase.cpp b/lib/Tooling/CompilationDatabase.cpp
index 4b776bf3c73d..7613988c96d9 100644
--- a/lib/Tooling/CompilationDatabase.cpp
+++ b/lib/Tooling/CompilationDatabase.cpp
@@ -35,7 +35,7 @@ namespace tooling {
CompilationDatabase::~CompilationDatabase() {}
-CompilationDatabase *
+std::unique_ptr<CompilationDatabase>
CompilationDatabase::loadFromDirectory(StringRef BuildDirectory,
std::string &ErrorMessage) {
std::stringstream ErrorStream;
@@ -45,17 +45,16 @@ CompilationDatabase::loadFromDirectory(StringRef BuildDirectory,
It != Ie; ++It) {
std::string DatabaseErrorMessage;
std::unique_ptr<CompilationDatabasePlugin> Plugin(It->instantiate());
- if (CompilationDatabase *DB =
- Plugin->loadFromDirectory(BuildDirectory, DatabaseErrorMessage))
+ if (std::unique_ptr<CompilationDatabase> DB =
+ Plugin->loadFromDirectory(BuildDirectory, DatabaseErrorMessage))
return DB;
- else
- ErrorStream << It->getName() << ": " << DatabaseErrorMessage << "\n";
+ ErrorStream << It->getName() << ": " << DatabaseErrorMessage << "\n";
}
ErrorMessage = ErrorStream.str();
return nullptr;
}
-static CompilationDatabase *
+static std::unique_ptr<CompilationDatabase>
findCompilationDatabaseFromDirectory(StringRef Directory,
std::string &ErrorMessage) {
std::stringstream ErrorStream;
@@ -63,8 +62,8 @@ findCompilationDatabaseFromDirectory(StringRef Directory,
while (!Directory.empty()) {
std::string LoadErrorMessage;
- if (CompilationDatabase *DB =
- CompilationDatabase::loadFromDirectory(Directory, LoadErrorMessage))
+ if (std::unique_ptr<CompilationDatabase> DB =
+ CompilationDatabase::loadFromDirectory(Directory, LoadErrorMessage))
return DB;
if (!HasErrorMessage) {
@@ -79,14 +78,14 @@ findCompilationDatabaseFromDirectory(StringRef Directory,
return nullptr;
}
-CompilationDatabase *
+std::unique_ptr<CompilationDatabase>
CompilationDatabase::autoDetectFromSource(StringRef SourceFile,
std::string &ErrorMessage) {
SmallString<1024> AbsolutePath(getAbsolutePath(SourceFile));
StringRef Directory = llvm::sys::path::parent_path(AbsolutePath);
- CompilationDatabase *DB = findCompilationDatabaseFromDirectory(Directory,
- ErrorMessage);
+ std::unique_ptr<CompilationDatabase> DB =
+ findCompilationDatabaseFromDirectory(Directory, ErrorMessage);
if (!DB)
ErrorMessage = ("Could not auto-detect compilation database for file \"" +
@@ -94,13 +93,13 @@ CompilationDatabase::autoDetectFromSource(StringRef SourceFile,
return DB;
}
-CompilationDatabase *
+std::unique_ptr<CompilationDatabase>
CompilationDatabase::autoDetectFromDirectory(StringRef SourceDir,
std::string &ErrorMessage) {
SmallString<1024> AbsolutePath(getAbsolutePath(SourceDir));
- CompilationDatabase *DB = findCompilationDatabaseFromDirectory(AbsolutePath,
- ErrorMessage);
+ std::unique_ptr<CompilationDatabase> DB =
+ findCompilationDatabaseFromDirectory(AbsolutePath, ErrorMessage);
if (!DB)
ErrorMessage = ("Could not auto-detect compilation database from directory \"" +
@@ -250,14 +249,13 @@ static bool stripPositionalArgs(std::vector<const char *> Args,
CompileJobAnalyzer CompileAnalyzer;
- for (driver::JobList::const_iterator I = Jobs.begin(), E = Jobs.end(); I != E;
- ++I) {
- if ((*I)->getKind() == driver::Job::CommandClass) {
- const driver::Command *Cmd = cast<driver::Command>(*I);
+ for (const auto &Job : Jobs) {
+ if (Job.getKind() == driver::Job::CommandClass) {
+ const driver::Command &Cmd = cast<driver::Command>(Job);
// Collect only for Assemble jobs. If we do all jobs we get duplicates
// since Link jobs point to Assemble jobs as inputs.
- if (Cmd->getSource().getKind() == driver::Action::AssembleJobClass)
- CompileAnalyzer.run(&Cmd->getSource());
+ if (Cmd.getSource().getKind() == driver::Action::AssembleJobClass)
+ CompileAnalyzer.run(&Cmd.getSource());
}
}
diff --git a/lib/Tooling/Core/CMakeLists.txt b/lib/Tooling/Core/CMakeLists.txt
new file mode 100644
index 000000000000..c8c75f95f3cb
--- /dev/null
+++ b/lib/Tooling/Core/CMakeLists.txt
@@ -0,0 +1,10 @@
+set(LLVM_LINK_COMPONENTS support)
+
+add_clang_library(clangToolingCore
+ Replacement.cpp
+
+ LINK_LIBS
+ clangBasic
+ clangLex
+ clangRewrite
+ )
diff --git a/lib/Tooling/Core/Makefile b/lib/Tooling/Core/Makefile
new file mode 100644
index 000000000000..366466c192e4
--- /dev/null
+++ b/lib/Tooling/Core/Makefile
@@ -0,0 +1,13 @@
+##===- clang/lib/Tooling/Core/Makefile ---------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../../..
+LIBRARYNAME := clangToolingCore
+
+include $(CLANG_LEVEL)/Makefile
diff --git a/lib/Tooling/Core/Replacement.cpp b/lib/Tooling/Core/Replacement.cpp
new file mode 100644
index 000000000000..525f7dfa91c6
--- /dev/null
+++ b/lib/Tooling/Core/Replacement.cpp
@@ -0,0 +1,289 @@
+//===--- Replacement.cpp - Framework for clang refactoring tools ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements classes to support/store refactorings.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/Tooling/Core/Replacement.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_os_ostream.h"
+
+namespace clang {
+namespace tooling {
+
+static const char * const InvalidLocation = "";
+
+Replacement::Replacement()
+ : FilePath(InvalidLocation) {}
+
+Replacement::Replacement(StringRef FilePath, unsigned Offset, unsigned Length,
+ StringRef ReplacementText)
+ : FilePath(FilePath), ReplacementRange(Offset, Length),
+ ReplacementText(ReplacementText) {}
+
+Replacement::Replacement(const SourceManager &Sources, SourceLocation Start,
+ unsigned Length, StringRef ReplacementText) {
+ setFromSourceLocation(Sources, Start, Length, ReplacementText);
+}
+
+Replacement::Replacement(const SourceManager &Sources,
+ const CharSourceRange &Range,
+ StringRef ReplacementText) {
+ setFromSourceRange(Sources, Range, ReplacementText);
+}
+
+bool Replacement::isApplicable() const {
+ return FilePath != InvalidLocation;
+}
+
+bool Replacement::apply(Rewriter &Rewrite) const {
+ SourceManager &SM = Rewrite.getSourceMgr();
+ const FileEntry *Entry = SM.getFileManager().getFile(FilePath);
+ if (!Entry)
+ return false;
+ FileID ID;
+ // FIXME: Use SM.translateFile directly.
+ SourceLocation Location = SM.translateFileLineCol(Entry, 1, 1);
+ ID = Location.isValid() ?
+ SM.getFileID(Location) :
+ SM.createFileID(Entry, SourceLocation(), SrcMgr::C_User);
+ // FIXME: We cannot check whether Offset + Length is in the file, as
+ // the remapping API is not public in the RewriteBuffer.
+ const SourceLocation Start =
+ SM.getLocForStartOfFile(ID).
+ getLocWithOffset(ReplacementRange.getOffset());
+ // ReplaceText returns false on success.
+ // ReplaceText only fails if the source location is not a file location, in
+ // which case we already returned false earlier.
+ bool RewriteSucceeded = !Rewrite.ReplaceText(
+ Start, ReplacementRange.getLength(), ReplacementText);
+ assert(RewriteSucceeded);
+ return RewriteSucceeded;
+}
+
+std::string Replacement::toString() const {
+ std::string result;
+ llvm::raw_string_ostream stream(result);
+ stream << FilePath << ": " << ReplacementRange.getOffset() << ":+"
+ << ReplacementRange.getLength() << ":\"" << ReplacementText << "\"";
+ return result;
+}
+
+bool operator<(const Replacement &LHS, const Replacement &RHS) {
+ if (LHS.getOffset() != RHS.getOffset())
+ return LHS.getOffset() < RHS.getOffset();
+ if (LHS.getLength() != RHS.getLength())
+ return LHS.getLength() < RHS.getLength();
+ if (LHS.getFilePath() != RHS.getFilePath())
+ return LHS.getFilePath() < RHS.getFilePath();
+ return LHS.getReplacementText() < RHS.getReplacementText();
+}
+
+bool operator==(const Replacement &LHS, const Replacement &RHS) {
+ return LHS.getOffset() == RHS.getOffset() &&
+ LHS.getLength() == RHS.getLength() &&
+ LHS.getFilePath() == RHS.getFilePath() &&
+ LHS.getReplacementText() == RHS.getReplacementText();
+}
+
+void Replacement::setFromSourceLocation(const SourceManager &Sources,
+ SourceLocation Start, unsigned Length,
+ StringRef ReplacementText) {
+ const std::pair<FileID, unsigned> DecomposedLocation =
+ Sources.getDecomposedLoc(Start);
+ const FileEntry *Entry = Sources.getFileEntryForID(DecomposedLocation.first);
+ if (Entry) {
+ // Make FilePath absolute so replacements can be applied correctly when
+ // relative paths for files are used.
+ llvm::SmallString<256> FilePath(Entry->getName());
+ std::error_code EC = llvm::sys::fs::make_absolute(FilePath);
+ this->FilePath = EC ? FilePath.c_str() : Entry->getName();
+ } else {
+ this->FilePath = InvalidLocation;
+ }
+ this->ReplacementRange = Range(DecomposedLocation.second, Length);
+ this->ReplacementText = ReplacementText;
+}
+
+// FIXME: This should go into the Lexer, but we need to figure out how
+// to handle ranges for refactoring in general first - there is no obvious
+// good way how to integrate this into the Lexer yet.
+static int getRangeSize(const SourceManager &Sources,
+ const CharSourceRange &Range) {
+ SourceLocation SpellingBegin = Sources.getSpellingLoc(Range.getBegin());
+ SourceLocation SpellingEnd = Sources.getSpellingLoc(Range.getEnd());
+ std::pair<FileID, unsigned> Start = Sources.getDecomposedLoc(SpellingBegin);
+ std::pair<FileID, unsigned> End = Sources.getDecomposedLoc(SpellingEnd);
+ if (Start.first != End.first) return -1;
+ if (Range.isTokenRange())
+ End.second += Lexer::MeasureTokenLength(SpellingEnd, Sources,
+ LangOptions());
+ return End.second - Start.second;
+}
+
+void Replacement::setFromSourceRange(const SourceManager &Sources,
+ const CharSourceRange &Range,
+ StringRef ReplacementText) {
+ setFromSourceLocation(Sources, Sources.getSpellingLoc(Range.getBegin()),
+ getRangeSize(Sources, Range), ReplacementText);
+}
+
+unsigned shiftedCodePosition(const Replacements &Replaces, unsigned Position) {
+ unsigned NewPosition = Position;
+ for (Replacements::iterator I = Replaces.begin(), E = Replaces.end(); I != E;
+ ++I) {
+ if (I->getOffset() >= Position)
+ break;
+ if (I->getOffset() + I->getLength() > Position)
+ NewPosition += I->getOffset() + I->getLength() - Position;
+ NewPosition += I->getReplacementText().size() - I->getLength();
+ }
+ return NewPosition;
+}
+
+// FIXME: Remove this function when Replacements is implemented as std::vector
+// instead of std::set.
+unsigned shiftedCodePosition(const std::vector<Replacement> &Replaces,
+ unsigned Position) {
+ unsigned NewPosition = Position;
+ for (std::vector<Replacement>::const_iterator I = Replaces.begin(),
+ E = Replaces.end();
+ I != E; ++I) {
+ if (I->getOffset() >= Position)
+ break;
+ if (I->getOffset() + I->getLength() > Position)
+ NewPosition += I->getOffset() + I->getLength() - Position;
+ NewPosition += I->getReplacementText().size() - I->getLength();
+ }
+ return NewPosition;
+}
+
+void deduplicate(std::vector<Replacement> &Replaces,
+ std::vector<Range> &Conflicts) {
+ if (Replaces.empty())
+ return;
+
+ auto LessNoPath = [](const Replacement &LHS, const Replacement &RHS) {
+ if (LHS.getOffset() != RHS.getOffset())
+ return LHS.getOffset() < RHS.getOffset();
+ if (LHS.getLength() != RHS.getLength())
+ return LHS.getLength() < RHS.getLength();
+ return LHS.getReplacementText() < RHS.getReplacementText();
+ };
+
+ auto EqualNoPath = [](const Replacement &LHS, const Replacement &RHS) {
+ return LHS.getOffset() == RHS.getOffset() &&
+ LHS.getLength() == RHS.getLength() &&
+ LHS.getReplacementText() == RHS.getReplacementText();
+ };
+
+ // Deduplicate. We don't want to deduplicate based on the path as we assume
+ // that all replacements refer to the same file (or are symlinks).
+ std::sort(Replaces.begin(), Replaces.end(), LessNoPath);
+ Replaces.erase(std::unique(Replaces.begin(), Replaces.end(), EqualNoPath),
+ Replaces.end());
+
+ // Detect conflicts
+ Range ConflictRange(Replaces.front().getOffset(),
+ Replaces.front().getLength());
+ unsigned ConflictStart = 0;
+ unsigned ConflictLength = 1;
+ for (unsigned i = 1; i < Replaces.size(); ++i) {
+ Range Current(Replaces[i].getOffset(), Replaces[i].getLength());
+ if (ConflictRange.overlapsWith(Current)) {
+ // Extend conflicted range
+ ConflictRange = Range(ConflictRange.getOffset(),
+ std::max(ConflictRange.getLength(),
+ Current.getOffset() + Current.getLength() -
+ ConflictRange.getOffset()));
+ ++ConflictLength;
+ } else {
+ if (ConflictLength > 1)
+ Conflicts.push_back(Range(ConflictStart, ConflictLength));
+ ConflictRange = Current;
+ ConflictStart = i;
+ ConflictLength = 1;
+ }
+ }
+
+ if (ConflictLength > 1)
+ Conflicts.push_back(Range(ConflictStart, ConflictLength));
+}
+
+bool applyAllReplacements(const Replacements &Replaces, Rewriter &Rewrite) {
+ bool Result = true;
+ for (Replacements::const_iterator I = Replaces.begin(),
+ E = Replaces.end();
+ I != E; ++I) {
+ if (I->isApplicable()) {
+ Result = I->apply(Rewrite) && Result;
+ } else {
+ Result = false;
+ }
+ }
+ return Result;
+}
+
+// FIXME: Remove this function when Replacements is implemented as std::vector
+// instead of std::set.
+bool applyAllReplacements(const std::vector<Replacement> &Replaces,
+ Rewriter &Rewrite) {
+ bool Result = true;
+ for (std::vector<Replacement>::const_iterator I = Replaces.begin(),
+ E = Replaces.end();
+ I != E; ++I) {
+ if (I->isApplicable()) {
+ Result = I->apply(Rewrite) && Result;
+ } else {
+ Result = false;
+ }
+ }
+ return Result;
+}
+
+std::string applyAllReplacements(StringRef Code, const Replacements &Replaces) {
+ FileManager Files((FileSystemOptions()));
+ DiagnosticsEngine Diagnostics(
+ IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
+ new DiagnosticOptions);
+ SourceManager SourceMgr(Diagnostics, Files);
+ Rewriter Rewrite(SourceMgr, LangOptions());
+ std::unique_ptr<llvm::MemoryBuffer> Buf =
+ llvm::MemoryBuffer::getMemBuffer(Code, "<stdin>");
+ const clang::FileEntry *Entry =
+ Files.getVirtualFile("<stdin>", Buf->getBufferSize(), 0);
+ SourceMgr.overrideFileContents(Entry, std::move(Buf));
+ FileID ID =
+ SourceMgr.createFileID(Entry, SourceLocation(), clang::SrcMgr::C_User);
+ for (Replacements::const_iterator I = Replaces.begin(), E = Replaces.end();
+ I != E; ++I) {
+ Replacement Replace("<stdin>", I->getOffset(), I->getLength(),
+ I->getReplacementText());
+ if (!Replace.apply(Rewrite))
+ return "";
+ }
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ Rewrite.getEditBuffer(ID).write(OS);
+ OS.flush();
+ return Result;
+}
+
+} // end namespace tooling
+} // end namespace clang
+
diff --git a/lib/Tooling/JSONCompilationDatabase.cpp b/lib/Tooling/JSONCompilationDatabase.cpp
index 8b8bd293851f..3b5f7e28d02d 100644
--- a/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/lib/Tooling/JSONCompilationDatabase.cpp
@@ -118,15 +118,15 @@ std::vector<std::string> unescapeCommandLine(
}
class JSONCompilationDatabasePlugin : public CompilationDatabasePlugin {
- CompilationDatabase *loadFromDirectory(StringRef Directory,
- std::string &ErrorMessage) override {
+ std::unique_ptr<CompilationDatabase>
+ loadFromDirectory(StringRef Directory, std::string &ErrorMessage) override {
SmallString<1024> JSONDatabasePath(Directory);
llvm::sys::path::append(JSONDatabasePath, "compile_commands.json");
std::unique_ptr<CompilationDatabase> Database(
JSONCompilationDatabase::loadFromFile(JSONDatabasePath, ErrorMessage));
if (!Database)
return nullptr;
- return Database.release();
+ return Database;
}
};
@@ -141,7 +141,7 @@ X("json-compilation-database", "Reads JSON formatted compilation databases");
// and thus register the JSONCompilationDatabasePlugin.
volatile int JSONAnchorSource = 0;
-JSONCompilationDatabase *
+std::unique_ptr<JSONCompilationDatabase>
JSONCompilationDatabase::loadFromFile(StringRef FilePath,
std::string &ErrorMessage) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> DatabaseBuffer =
@@ -151,22 +151,22 @@ JSONCompilationDatabase::loadFromFile(StringRef FilePath,
return nullptr;
}
std::unique_ptr<JSONCompilationDatabase> Database(
- new JSONCompilationDatabase(DatabaseBuffer->release()));
+ new JSONCompilationDatabase(std::move(*DatabaseBuffer)));
if (!Database->parse(ErrorMessage))
return nullptr;
- return Database.release();
+ return Database;
}
-JSONCompilationDatabase *
+std::unique_ptr<JSONCompilationDatabase>
JSONCompilationDatabase::loadFromBuffer(StringRef DatabaseString,
std::string &ErrorMessage) {
std::unique_ptr<llvm::MemoryBuffer> DatabaseBuffer(
llvm::MemoryBuffer::getMemBuffer(DatabaseString));
std::unique_ptr<JSONCompilationDatabase> Database(
- new JSONCompilationDatabase(DatabaseBuffer.release()));
+ new JSONCompilationDatabase(std::move(DatabaseBuffer)));
if (!Database->parse(ErrorMessage))
return nullptr;
- return Database.release();
+ return Database;
}
std::vector<CompileCommand>
diff --git a/lib/Tooling/Makefile b/lib/Tooling/Makefile
index 0d2e7a29bcf9..7ea85a8908d6 100644
--- a/lib/Tooling/Makefile
+++ b/lib/Tooling/Makefile
@@ -9,5 +9,6 @@
CLANG_LEVEL := ../..
LIBRARYNAME := clangTooling
+PARALLEL_DIRS := Core
include $(CLANG_LEVEL)/Makefile
diff --git a/lib/Tooling/Refactoring.cpp b/lib/Tooling/Refactoring.cpp
index c96b8c92a905..c8173060d8c1 100644
--- a/lib/Tooling/Refactoring.cpp
+++ b/lib/Tooling/Refactoring.cpp
@@ -25,252 +25,6 @@
namespace clang {
namespace tooling {
-static const char * const InvalidLocation = "";
-
-Replacement::Replacement()
- : FilePath(InvalidLocation) {}
-
-Replacement::Replacement(StringRef FilePath, unsigned Offset, unsigned Length,
- StringRef ReplacementText)
- : FilePath(FilePath), ReplacementRange(Offset, Length),
- ReplacementText(ReplacementText) {}
-
-Replacement::Replacement(const SourceManager &Sources, SourceLocation Start,
- unsigned Length, StringRef ReplacementText) {
- setFromSourceLocation(Sources, Start, Length, ReplacementText);
-}
-
-Replacement::Replacement(const SourceManager &Sources,
- const CharSourceRange &Range,
- StringRef ReplacementText) {
- setFromSourceRange(Sources, Range, ReplacementText);
-}
-
-bool Replacement::isApplicable() const {
- return FilePath != InvalidLocation;
-}
-
-bool Replacement::apply(Rewriter &Rewrite) const {
- SourceManager &SM = Rewrite.getSourceMgr();
- const FileEntry *Entry = SM.getFileManager().getFile(FilePath);
- if (!Entry)
- return false;
- FileID ID;
- // FIXME: Use SM.translateFile directly.
- SourceLocation Location = SM.translateFileLineCol(Entry, 1, 1);
- ID = Location.isValid() ?
- SM.getFileID(Location) :
- SM.createFileID(Entry, SourceLocation(), SrcMgr::C_User);
- // FIXME: We cannot check whether Offset + Length is in the file, as
- // the remapping API is not public in the RewriteBuffer.
- const SourceLocation Start =
- SM.getLocForStartOfFile(ID).
- getLocWithOffset(ReplacementRange.getOffset());
- // ReplaceText returns false on success.
- // ReplaceText only fails if the source location is not a file location, in
- // which case we already returned false earlier.
- bool RewriteSucceeded = !Rewrite.ReplaceText(
- Start, ReplacementRange.getLength(), ReplacementText);
- assert(RewriteSucceeded);
- return RewriteSucceeded;
-}
-
-std::string Replacement::toString() const {
- std::string result;
- llvm::raw_string_ostream stream(result);
- stream << FilePath << ": " << ReplacementRange.getOffset() << ":+"
- << ReplacementRange.getLength() << ":\"" << ReplacementText << "\"";
- return result;
-}
-
-bool operator<(const Replacement &LHS, const Replacement &RHS) {
- if (LHS.getOffset() != RHS.getOffset())
- return LHS.getOffset() < RHS.getOffset();
- if (LHS.getLength() != RHS.getLength())
- return LHS.getLength() < RHS.getLength();
- if (LHS.getFilePath() != RHS.getFilePath())
- return LHS.getFilePath() < RHS.getFilePath();
- return LHS.getReplacementText() < RHS.getReplacementText();
-}
-
-bool operator==(const Replacement &LHS, const Replacement &RHS) {
- return LHS.getOffset() == RHS.getOffset() &&
- LHS.getLength() == RHS.getLength() &&
- LHS.getFilePath() == RHS.getFilePath() &&
- LHS.getReplacementText() == RHS.getReplacementText();
-}
-
-void Replacement::setFromSourceLocation(const SourceManager &Sources,
- SourceLocation Start, unsigned Length,
- StringRef ReplacementText) {
- const std::pair<FileID, unsigned> DecomposedLocation =
- Sources.getDecomposedLoc(Start);
- const FileEntry *Entry = Sources.getFileEntryForID(DecomposedLocation.first);
- if (Entry) {
- // Make FilePath absolute so replacements can be applied correctly when
- // relative paths for files are used.
- llvm::SmallString<256> FilePath(Entry->getName());
- std::error_code EC = llvm::sys::fs::make_absolute(FilePath);
- this->FilePath = EC ? FilePath.c_str() : Entry->getName();
- } else {
- this->FilePath = InvalidLocation;
- }
- this->ReplacementRange = Range(DecomposedLocation.second, Length);
- this->ReplacementText = ReplacementText;
-}
-
-// FIXME: This should go into the Lexer, but we need to figure out how
-// to handle ranges for refactoring in general first - there is no obvious
-// good way how to integrate this into the Lexer yet.
-static int getRangeSize(const SourceManager &Sources,
- const CharSourceRange &Range) {
- SourceLocation SpellingBegin = Sources.getSpellingLoc(Range.getBegin());
- SourceLocation SpellingEnd = Sources.getSpellingLoc(Range.getEnd());
- std::pair<FileID, unsigned> Start = Sources.getDecomposedLoc(SpellingBegin);
- std::pair<FileID, unsigned> End = Sources.getDecomposedLoc(SpellingEnd);
- if (Start.first != End.first) return -1;
- if (Range.isTokenRange())
- End.second += Lexer::MeasureTokenLength(SpellingEnd, Sources,
- LangOptions());
- return End.second - Start.second;
-}
-
-void Replacement::setFromSourceRange(const SourceManager &Sources,
- const CharSourceRange &Range,
- StringRef ReplacementText) {
- setFromSourceLocation(Sources, Sources.getSpellingLoc(Range.getBegin()),
- getRangeSize(Sources, Range), ReplacementText);
-}
-
-bool applyAllReplacements(const Replacements &Replaces, Rewriter &Rewrite) {
- bool Result = true;
- for (Replacements::const_iterator I = Replaces.begin(),
- E = Replaces.end();
- I != E; ++I) {
- if (I->isApplicable()) {
- Result = I->apply(Rewrite) && Result;
- } else {
- Result = false;
- }
- }
- return Result;
-}
-
-// FIXME: Remove this function when Replacements is implemented as std::vector
-// instead of std::set.
-bool applyAllReplacements(const std::vector<Replacement> &Replaces,
- Rewriter &Rewrite) {
- bool Result = true;
- for (std::vector<Replacement>::const_iterator I = Replaces.begin(),
- E = Replaces.end();
- I != E; ++I) {
- if (I->isApplicable()) {
- Result = I->apply(Rewrite) && Result;
- } else {
- Result = false;
- }
- }
- return Result;
-}
-
-std::string applyAllReplacements(StringRef Code, const Replacements &Replaces) {
- FileManager Files((FileSystemOptions()));
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
- new DiagnosticOptions);
- Diagnostics.setClient(new TextDiagnosticPrinter(
- llvm::outs(), &Diagnostics.getDiagnosticOptions()));
- SourceManager SourceMgr(Diagnostics, Files);
- Rewriter Rewrite(SourceMgr, LangOptions());
- llvm::MemoryBuffer *Buf = llvm::MemoryBuffer::getMemBuffer(Code, "<stdin>");
- const clang::FileEntry *Entry =
- Files.getVirtualFile("<stdin>", Buf->getBufferSize(), 0);
- SourceMgr.overrideFileContents(Entry, Buf);
- FileID ID =
- SourceMgr.createFileID(Entry, SourceLocation(), clang::SrcMgr::C_User);
- for (Replacements::const_iterator I = Replaces.begin(), E = Replaces.end();
- I != E; ++I) {
- Replacement Replace("<stdin>", I->getOffset(), I->getLength(),
- I->getReplacementText());
- if (!Replace.apply(Rewrite))
- return "";
- }
- std::string Result;
- llvm::raw_string_ostream OS(Result);
- Rewrite.getEditBuffer(ID).write(OS);
- OS.flush();
- return Result;
-}
-
-unsigned shiftedCodePosition(const Replacements &Replaces, unsigned Position) {
- unsigned NewPosition = Position;
- for (Replacements::iterator I = Replaces.begin(), E = Replaces.end(); I != E;
- ++I) {
- if (I->getOffset() >= Position)
- break;
- if (I->getOffset() + I->getLength() > Position)
- NewPosition += I->getOffset() + I->getLength() - Position;
- NewPosition += I->getReplacementText().size() - I->getLength();
- }
- return NewPosition;
-}
-
-// FIXME: Remove this function when Replacements is implemented as std::vector
-// instead of std::set.
-unsigned shiftedCodePosition(const std::vector<Replacement> &Replaces,
- unsigned Position) {
- unsigned NewPosition = Position;
- for (std::vector<Replacement>::const_iterator I = Replaces.begin(),
- E = Replaces.end();
- I != E; ++I) {
- if (I->getOffset() >= Position)
- break;
- if (I->getOffset() + I->getLength() > Position)
- NewPosition += I->getOffset() + I->getLength() - Position;
- NewPosition += I->getReplacementText().size() - I->getLength();
- }
- return NewPosition;
-}
-
-void deduplicate(std::vector<Replacement> &Replaces,
- std::vector<Range> &Conflicts) {
- if (Replaces.empty())
- return;
-
- // Deduplicate
- std::sort(Replaces.begin(), Replaces.end());
- std::vector<Replacement>::iterator End =
- std::unique(Replaces.begin(), Replaces.end());
- Replaces.erase(End, Replaces.end());
-
- // Detect conflicts
- Range ConflictRange(Replaces.front().getOffset(),
- Replaces.front().getLength());
- unsigned ConflictStart = 0;
- unsigned ConflictLength = 1;
- for (unsigned i = 1; i < Replaces.size(); ++i) {
- Range Current(Replaces[i].getOffset(), Replaces[i].getLength());
- if (ConflictRange.overlapsWith(Current)) {
- // Extend conflicted range
- ConflictRange = Range(ConflictRange.getOffset(),
- std::max(ConflictRange.getLength(),
- Current.getOffset() + Current.getLength() -
- ConflictRange.getOffset()));
- ++ConflictLength;
- } else {
- if (ConflictLength > 1)
- Conflicts.push_back(Range(ConflictStart, ConflictLength));
- ConflictRange = Current;
- ConflictStart = i;
- ConflictLength = 1;
- }
- }
-
- if (ConflictLength > 1)
- Conflicts.push_back(Range(ConflictStart, ConflictLength));
-}
-
-
RefactoringTool::RefactoringTool(const CompilationDatabase &Compilations,
ArrayRef<std::string> SourcePaths)
: ClangTool(Compilations, SourcePaths) {}
diff --git a/lib/Tooling/Tooling.cpp b/lib/Tooling/Tooling.cpp
index 0db38db3c91e..60371fb5e0e6 100644
--- a/lib/Tooling/Tooling.cpp
+++ b/lib/Tooling/Tooling.cpp
@@ -79,14 +79,14 @@ static const llvm::opt::ArgStringList *getCC1Arguments(
}
// The one job we find should be to invoke clang again.
- const clang::driver::Command *Cmd =
+ const clang::driver::Command &Cmd =
cast<clang::driver::Command>(*Jobs.begin());
- if (StringRef(Cmd->getCreator().getName()) != "clang") {
+ if (StringRef(Cmd.getCreator().getName()) != "clang") {
Diagnostics->Report(clang::diag::err_fe_expected_clang_command);
return nullptr;
}
- return &Cmd->getArguments();
+ return &Cmd.getArguments();
}
/// \brief Returns a clang build invocation initialized from the CC1 flags.
@@ -123,17 +123,25 @@ getSyntaxOnlyToolArgs(const std::vector<std::string> &ExtraArgs,
bool runToolOnCodeWithArgs(clang::FrontendAction *ToolAction, const Twine &Code,
const std::vector<std::string> &Args,
- const Twine &FileName) {
+ const Twine &FileName,
+ const FileContentMappings &VirtualMappedFiles) {
+
SmallString<16> FileNameStorage;
StringRef FileNameRef = FileName.toNullTerminatedStringRef(FileNameStorage);
llvm::IntrusiveRefCntPtr<FileManager> Files(
new FileManager(FileSystemOptions()));
- ToolInvocation Invocation(getSyntaxOnlyToolArgs(Args, FileNameRef), ToolAction,
- Files.get());
+ ToolInvocation Invocation(getSyntaxOnlyToolArgs(Args, FileNameRef),
+ ToolAction, Files.get());
SmallString<1024> CodeStorage;
Invocation.mapVirtualFile(FileNameRef,
Code.toNullTerminatedStringRef(CodeStorage));
+
+ for (auto &FilenameWithContent : VirtualMappedFiles) {
+ Invocation.mapVirtualFile(FilenameWithContent.first,
+ FilenameWithContent.second);
+ }
+
return Invocation.run();
}
@@ -186,10 +194,6 @@ ToolInvocation::~ToolInvocation() {
delete Action;
}
-void ToolInvocation::setDiagnosticConsumer(DiagnosticConsumer *D) {
- DiagConsumer = D;
-}
-
void ToolInvocation::mapVirtualFile(StringRef FilePath, StringRef Content) {
SmallString<1024> PathStorage;
llvm::sys::path::native(FilePath, PathStorage);
@@ -223,8 +227,10 @@ bool ToolInvocation::run() {
newInvocation(&Diagnostics, *CC1Args));
for (const auto &It : MappedFileContents) {
// Inject the code as the given file name into the preprocessor options.
- auto *Input = llvm::MemoryBuffer::getMemBuffer(It.getValue());
- Invocation->getPreprocessorOpts().addRemappedFile(It.getKey(), Input);
+ std::unique_ptr<llvm::MemoryBuffer> Input =
+ llvm::MemoryBuffer::getMemBuffer(It.getValue());
+ Invocation->getPreprocessorOpts().addRemappedFile(It.getKey(),
+ Input.release());
}
return runInvocation(BinaryName, Compilation.get(), Invocation.release());
}
@@ -271,51 +277,27 @@ bool FrontendActionFactory::runInvocation(CompilerInvocation *Invocation,
ClangTool::ClangTool(const CompilationDatabase &Compilations,
ArrayRef<std::string> SourcePaths)
- : Files(new FileManager(FileSystemOptions())), DiagConsumer(nullptr) {
- ArgsAdjusters.push_back(new ClangStripOutputAdjuster());
- ArgsAdjusters.push_back(new ClangSyntaxOnlyAdjuster());
- for (const auto &SourcePath : SourcePaths) {
- std::string File(getAbsolutePath(SourcePath));
-
- std::vector<CompileCommand> CompileCommandsForFile =
- Compilations.getCompileCommands(File);
- if (!CompileCommandsForFile.empty()) {
- for (CompileCommand &CompileCommand : CompileCommandsForFile) {
- CompileCommands.push_back(
- std::make_pair(File, std::move(CompileCommand)));
- }
- } else {
- // FIXME: There are two use cases here: doing a fuzzy
- // "find . -name '*.cc' |xargs tool" match, where as a user I don't care
- // about the .cc files that were not found, and the use case where I
- // specify all files I want to run over explicitly, where this should
- // be an error. We'll want to add an option for this.
- llvm::errs() << "Skipping " << File << ". Compile command not found.\n";
- }
- }
+ : Compilations(Compilations), SourcePaths(SourcePaths),
+ Files(new FileManager(FileSystemOptions())), DiagConsumer(nullptr) {
+ appendArgumentsAdjuster(getClangStripOutputAdjuster());
+ appendArgumentsAdjuster(getClangSyntaxOnlyAdjuster());
}
-void ClangTool::setDiagnosticConsumer(DiagnosticConsumer *D) {
- DiagConsumer = D;
-}
+ClangTool::~ClangTool() {}
void ClangTool::mapVirtualFile(StringRef FilePath, StringRef Content) {
MappedFileContents.push_back(std::make_pair(FilePath, Content));
}
-void ClangTool::setArgumentsAdjuster(ArgumentsAdjuster *Adjuster) {
- clearArgumentsAdjusters();
- appendArgumentsAdjuster(Adjuster);
-}
-
-void ClangTool::appendArgumentsAdjuster(ArgumentsAdjuster *Adjuster) {
- ArgsAdjusters.push_back(Adjuster);
+void ClangTool::appendArgumentsAdjuster(ArgumentsAdjuster Adjuster) {
+ if (ArgsAdjuster)
+ ArgsAdjuster = combineAdjusters(ArgsAdjuster, Adjuster);
+ else
+ ArgsAdjuster = Adjuster;
}
void ClangTool::clearArgumentsAdjusters() {
- for (unsigned I = 0, E = ArgsAdjusters.size(); I != E; ++I)
- delete ArgsAdjusters[I];
- ArgsAdjusters.clear();
+ ArgsAdjuster = nullptr;
}
int ClangTool::run(ToolAction *Action) {
@@ -330,37 +312,65 @@ int ClangTool::run(ToolAction *Action) {
std::string MainExecutable =
llvm::sys::fs::getMainExecutable("clang_tool", &StaticSymbol);
+ llvm::SmallString<128> InitialDirectory;
+ if (std::error_code EC = llvm::sys::fs::current_path(InitialDirectory))
+ llvm::report_fatal_error("Cannot detect current path: " +
+ Twine(EC.message()));
bool ProcessingFailed = false;
- for (const auto &Command : CompileCommands) {
- // FIXME: chdir is thread hostile; on the other hand, creating the same
- // behavior as chdir is complex: chdir resolves the path once, thus
- // guaranteeing that all subsequent relative path operations work
- // on the same path the original chdir resulted in. This makes a difference
- // for example on network filesystems, where symlinks might be switched
- // during runtime of the tool. Fixing this depends on having a file system
- // abstraction that allows openat() style interactions.
- if (chdir(Command.second.Directory.c_str()))
- llvm::report_fatal_error("Cannot chdir into \"" +
- Twine(Command.second.Directory) + "\n!");
- std::vector<std::string> CommandLine = Command.second.CommandLine;
- for (ArgumentsAdjuster *Adjuster : ArgsAdjusters)
- CommandLine = Adjuster->Adjust(CommandLine);
- assert(!CommandLine.empty());
- CommandLine[0] = MainExecutable;
- // FIXME: We need a callback mechanism for the tool writer to output a
- // customized message for each file.
- DEBUG({
- llvm::dbgs() << "Processing: " << Command.first << ".\n";
- });
- ToolInvocation Invocation(std::move(CommandLine), Action, Files.get());
- Invocation.setDiagnosticConsumer(DiagConsumer);
- for (const auto &MappedFile : MappedFileContents) {
- Invocation.mapVirtualFile(MappedFile.first, MappedFile.second);
+ for (const auto &SourcePath : SourcePaths) {
+ std::string File(getAbsolutePath(SourcePath));
+
+ // Currently implementations of CompilationDatabase::getCompileCommands can
+ // change the state of the file system (e.g. prepare generated headers), so
+ // this method needs to run right before we invoke the tool, as the next
+ // file may require a different (incompatible) state of the file system.
+ //
+ // FIXME: Make the compilation database interface more explicit about the
+ // requirements to the order of invocation of its members.
+ std::vector<CompileCommand> CompileCommandsForFile =
+ Compilations.getCompileCommands(File);
+ if (CompileCommandsForFile.empty()) {
+ // FIXME: There are two use cases here: doing a fuzzy
+ // "find . -name '*.cc' |xargs tool" match, where as a user I don't care
+ // about the .cc files that were not found, and the use case where I
+ // specify all files I want to run over explicitly, where this should
+ // be an error. We'll want to add an option for this.
+ llvm::errs() << "Skipping " << File << ". Compile command not found.\n";
+ continue;
}
- if (!Invocation.run()) {
- // FIXME: Diagnostics should be used instead.
- llvm::errs() << "Error while processing " << Command.first << ".\n";
- ProcessingFailed = true;
+ for (CompileCommand &CompileCommand : CompileCommandsForFile) {
+ // FIXME: chdir is thread hostile; on the other hand, creating the same
+ // behavior as chdir is complex: chdir resolves the path once, thus
+ // guaranteeing that all subsequent relative path operations work
+ // on the same path the original chdir resulted in. This makes a
+ // difference for example on network filesystems, where symlinks might be
+ // switched during runtime of the tool. Fixing this depends on having a
+ // file system abstraction that allows openat() style interactions.
+ if (chdir(CompileCommand.Directory.c_str()))
+ llvm::report_fatal_error("Cannot chdir into \"" +
+ Twine(CompileCommand.Directory) + "\n!");
+ std::vector<std::string> CommandLine = CompileCommand.CommandLine;
+ if (ArgsAdjuster)
+ CommandLine = ArgsAdjuster(CommandLine);
+ assert(!CommandLine.empty());
+ CommandLine[0] = MainExecutable;
+ // FIXME: We need a callback mechanism for the tool writer to output a
+ // customized message for each file.
+ DEBUG({ llvm::dbgs() << "Processing: " << File << ".\n"; });
+ ToolInvocation Invocation(std::move(CommandLine), Action, Files.get());
+ Invocation.setDiagnosticConsumer(DiagConsumer);
+ for (const auto &MappedFile : MappedFileContents)
+ Invocation.mapVirtualFile(MappedFile.first, MappedFile.second);
+ if (!Invocation.run()) {
+ // FIXME: Diagnostics should be used instead.
+ llvm::errs() << "Error while processing " << File << ".\n";
+ ProcessingFailed = true;
+ }
+ // Return to the initial directory to correctly resolve next file by
+ // relative path.
+ if (chdir(InitialDirectory.c_str()))
+ llvm::report_fatal_error("Cannot chdir into \"" +
+ Twine(InitialDirectory) + "\n!");
}
}
return ProcessingFailed ? 1 : 0;